Browse Source

v8: upgrade to v3.11.10

v0.9.1-release
Bert Belder 13 years ago
parent
commit
50464cd4f4
  1. 1
      deps/v8/AUTHORS
  2. 311
      deps/v8/ChangeLog
  3. 27
      deps/v8/DEPS
  4. 69
      deps/v8/Makefile
  5. 4
      deps/v8/SConstruct
  6. 36
      deps/v8/build/armu.gypi
  7. 126
      deps/v8/build/common.gypi
  8. 2
      deps/v8/build/gyp_v8
  9. 33
      deps/v8/build/mipsu.gypi
  10. 12
      deps/v8/build/standalone.gypi
  11. 85
      deps/v8/include/v8-profiler.h
  12. 251
      deps/v8/include/v8.h
  13. 6
      deps/v8/samples/lineprocessor.cc
  14. 8
      deps/v8/samples/samples.gyp
  15. 7
      deps/v8/samples/shell.cc
  16. 1
      deps/v8/src/SConscript
  17. 4
      deps/v8/src/allocation-inl.h
  18. 14
      deps/v8/src/allocation.h
  19. 175
      deps/v8/src/api.cc
  20. 11
      deps/v8/src/api.h
  21. 9
      deps/v8/src/apiutils.h
  22. 13
      deps/v8/src/arguments.h
  23. 9
      deps/v8/src/arm/builtins-arm.cc
  24. 116
      deps/v8/src/arm/code-stubs-arm.cc
  25. 4
      deps/v8/src/arm/codegen-arm.cc
  26. 4
      deps/v8/src/arm/debug-arm.cc
  27. 293
      deps/v8/src/arm/full-codegen-arm.cc
  28. 53
      deps/v8/src/arm/ic-arm.cc
  29. 208
      deps/v8/src/arm/lithium-arm.cc
  30. 116
      deps/v8/src/arm/lithium-arm.h
  31. 458
      deps/v8/src/arm/lithium-codegen-arm.cc
  32. 41
      deps/v8/src/arm/lithium-codegen-arm.h
  33. 4
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  34. 119
      deps/v8/src/arm/macro-assembler-arm.cc
  35. 23
      deps/v8/src/arm/macro-assembler-arm.h
  36. 210
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  37. 24
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  38. 12
      deps/v8/src/arm/simulator-arm.h
  39. 340
      deps/v8/src/arm/stub-cache-arm.cc
  40. 112
      deps/v8/src/array.js
  41. 95
      deps/v8/src/assembler.cc
  42. 7
      deps/v8/src/assembler.h
  43. 267
      deps/v8/src/ast.cc
  44. 81
      deps/v8/src/ast.h
  45. 38
      deps/v8/src/bootstrapper.cc
  46. 151
      deps/v8/src/builtins.cc
  47. 1
      deps/v8/src/builtins.h
  48. 35
      deps/v8/src/bytecodes-irregexp.h
  49. 51
      deps/v8/src/code-stubs.cc
  50. 1
      deps/v8/src/code-stubs.h
  51. 6
      deps/v8/src/codegen.h
  52. 17
      deps/v8/src/compiler-intrinsics.h
  53. 21
      deps/v8/src/compiler.cc
  54. 22
      deps/v8/src/contexts.h
  55. 4
      deps/v8/src/conversions-inl.h
  56. 239
      deps/v8/src/d8.cc
  57. 6
      deps/v8/src/d8.h
  58. 2
      deps/v8/src/d8.js
  59. 3
      deps/v8/src/dateparser-inl.h
  60. 38
      deps/v8/src/debug-agent.cc
  61. 49
      deps/v8/src/debug-debugger.js
  62. 86
      deps/v8/src/debug.cc
  63. 55
      deps/v8/src/debug.h
  64. 68
      deps/v8/src/deoptimizer.cc
  65. 24
      deps/v8/src/deoptimizer.h
  66. 6
      deps/v8/src/double.h
  67. 134
      deps/v8/src/elements-kind.cc
  68. 221
      deps/v8/src/elements-kind.h
  69. 537
      deps/v8/src/elements.cc
  70. 23
      deps/v8/src/elements.h
  71. 7
      deps/v8/src/extensions/externalize-string-extension.cc
  72. 5
      deps/v8/src/extensions/gc-extension.cc
  73. 76
      deps/v8/src/factory.cc
  74. 26
      deps/v8/src/factory.h
  75. 16
      deps/v8/src/flag-definitions.h
  76. 50
      deps/v8/src/frames.cc
  77. 12
      deps/v8/src/frames.h
  78. 133
      deps/v8/src/full-codegen.cc
  79. 54
      deps/v8/src/full-codegen.h
  80. 15
      deps/v8/src/func-name-inferrer.cc
  81. 10
      deps/v8/src/func-name-inferrer.h
  82. 6
      deps/v8/src/handles.cc
  83. 102
      deps/v8/src/hashmap.h
  84. 27
      deps/v8/src/heap-inl.h
  85. 50
      deps/v8/src/heap-profiler.cc
  86. 13
      deps/v8/src/heap-profiler.h
  87. 317
      deps/v8/src/heap.cc
  88. 59
      deps/v8/src/heap.h
  89. 247
      deps/v8/src/hydrogen-instructions.cc
  90. 462
      deps/v8/src/hydrogen-instructions.h
  91. 1829
      deps/v8/src/hydrogen.cc
  92. 147
      deps/v8/src/hydrogen.h
  93. 3
      deps/v8/src/ia32/assembler-ia32.h
  94. 20
      deps/v8/src/ia32/builtins-ia32.cc
  95. 90
      deps/v8/src/ia32/code-stubs-ia32.cc
  96. 22
      deps/v8/src/ia32/codegen-ia32.cc
  97. 39
      deps/v8/src/ia32/debug-ia32.cc
  98. 98
      deps/v8/src/ia32/deoptimizer-ia32.cc
  99. 6
      deps/v8/src/ia32/frames-ia32.h
  100. 307
      deps/v8/src/ia32/full-codegen-ia32.cc

1
deps/v8/AUTHORS

@ -23,6 +23,7 @@ Daniel James <dnljms@gmail.com>
Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>

311
deps/v8/ChangeLog

@ -1,3 +1,314 @@
2012-06-13: Version 3.11.10
Implemented heap profiler memory usage reporting.
Preserved error message during finally block in try..finally.
(Chromium issue 129171)
Fixed EnsureCanContainElements to properly handle double values.
(issue 2170)
Improved heuristics to keep objects in fast mode with inherited
constructors.
Performance and stability improvements on all platforms.
2012-06-06: Version 3.11.9
Implemented ES5-conformant semantics for inherited setters and read-only
properties. Currently behind --es5_readonly flag, because it breaks
WebKit bindings.
Exposed last seen heap object id via v8 public api.
Performance and stability improvements on all platforms.
2012-05-31: Version 3.11.8
Avoid overdeep recursion in regexp where a guarded expression with a
minimum repetition count is inside another quantifier.
(Chromium issue 129926)
Fixed missing write barrier in store field stub.
(issues 2143, 1465, Chromium issue 129355)
Proxies: Fixed receiver for setters inherited from proxies.
Proxies: Fixed ToStringArray function so that it does not reject some
keys.
(issue 1543)
Performance and stability improvements on all platforms.
2012-05-29: Version 3.11.7
Get better function names in stack traces.
Performance and stability improvements on all platforms.
2012-05-24: Version 3.11.6
Fixed RegExp.prototype.toString for incompatible receivers
(issue 1981).
Performance and stability improvements on all platforms.
2012-05-23: Version 3.11.5
Performance and stability improvements on all platforms.
2012-05-22: Version 3.11.4
Some cleanup to common.gypi. This fixes some host/target combinations
that weren't working in the Make build on Mac.
Handle EINTR in socket functions and continue incomplete sends.
(issue 2098)
Fixed python deprecations. (issue 1391)
Made socket send and receive more robust and return 0 on failure.
(Chromium issue 15719)
Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Set '-m32' option for host and target platforms
Performance and stability improvements on all platforms.
2012-05-18: Version 3.11.3
Disable optimization for functions that have scopes that cannot be
reconstructed from the context chain. (issue 2071)
Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
Correctly check for native error objects. (Chromium issue 2138)
Performance and stability improvements on all platforms.
2012-05-16: Version 3.11.2
Revert r11496. (Chromium issue 128146)
Implement map collection for incremental marking. (issue 1465)
Add toString method to CallSite (which describes a frame of the
stack trace).
2012-05-15: Version 3.11.1
Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
Fix freebsd build. (V8 issue 2126)
Performance and stability improvements on all platforms.
2012-05-11: Version 3.11.0
Fixed compose-discard crasher from r11524 (issue 2123).
Activated new global semantics by default. Global variables can
now shadow properties of the global object (ES5.1 erratum).
Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
transitioning (Chromium issue 117409).
Made Error.prototype.name writable again, as required by the spec and
the web (Chromium issue 69187).
Implemented map collection with incremental marking (issue 1465).
Regexp: Fixed overflow in min-match-length calculation
(Chromium issue 126412).
MIPS: Fixed illegal instruction use on Loongson in code for
Math.random() (issue 2115).
Fixed crash bug in VisitChoice (Chromium issue 126272).
Fixed unsigned-Smi check in MappedArgumentsLookup
(Chromium issue 126414).
Fixed LiveEdit for function with no locals (issue 825).
Fixed register clobbering in LoadIC for interceptors
(Chromium issue 125988).
Implemented clearing of CompareICs (issue 2102).
Performance and stability improvements on all platforms.
2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation.
Ensured reload of elements pointer in StoreFastDoubleElement stub.
(Chromium issue 125515)
Fixed corner cases in truncation behavior when storing to
TypedArrays. (issue 2110)
Fixed failure to properly recognize and report out-of-memory
conditions when allocating code space pages. (Chromium issue
118625)
Fixed idle notifications to perform a round of incremental GCs
after context disposal. (issue 2107)
Fixed preparser for try statement. (issue 2109)
Performance and stability improvements on all platforms.
2012-04-30: Version 3.10.7
Performance and stability improvements on all platforms.
2012-04-26: Version 3.10.6
Fixed some bugs in accessing details of the last regexp match.
Fixed source property of empty RegExp objects. (issue 1982)
Enabled inlining some V8 API functions.
Performance and stability improvements on all platforms.
2012-04-23: Version 3.10.5
Put new global var semantics behind a flag until WebKit tests are
cleaned up.
Enabled stepping into callback passed to builtins.
(Chromium issue 109564)
Performance and stability improvements on all platforms.
2012-04-19: Version 3.10.4
Fixed issues when stressing compaction with WeakMaps.
Fixed missing GVN flag for new-space promotion. (Chromium issue 123919)
Simplify invocation sequence at monomorphic function invocation sites.
(issue 2079)
Performance and stability improvements on all platforms.
2012-04-17: Version 3.10.3
Fixed several bugs in heap profiles (including issue 2078).
Throw syntax errors on illegal escape sequences.
Implemented rudimentary module linking (behind --harmony flag)
Implemented ES5 erratum: Global declarations should shadow
inherited properties.
Made handling of const more consistent when combined with 'eval'
and 'with'.
Fixed V8 on MinGW-x64 (issue 2026).
Performance and stability improvements on all platforms.
2012-04-13: Version 3.10.2
Fixed native ARM build (issues 1744, 539)
Return LOOKUP variable instead of CONTEXT for non-context allocated
outer scope parameters (Chromium issue 119609).
Fixed regular and ElementsKind transitions interfering with each other
(Chromium issue 122271).
Improved performance of keyed loads/stores which have a HeapNumber
index (issues 1388, 1295).
Fixed WeakMap processing for evacuation candidates (issue 2060).
Bailout on possible direct eval calls (Chromium issue 122681).
Do not assume that names of function expressions are context-allocated
(issue 2051).
Performance and stability improvements on all platforms.
2012-04-10: Version 3.10.1
Fixed bug with arguments object in inlined functions (issue 2045).
Fixed performance bug with lazy initialization (Chromium issue
118686).
Added suppport for Mac OS X 64bit builds with GYP.
(Patch contributed by Filipe David Manana <fdmanana@gmail.com>)
Fixed bug with hidden properties (issue 2034).
Fixed a performance bug when reloading pages (Chromium issue 117767,
V8 issue 1902).
Fixed bug when optimizing throw in top-level code (issue 2054).
Fixed two bugs with array literals (issue 2055, Chromium issue 121407).
Fixed bug with Math.min/Math.max with NaN inputs (issue 2056).
Fixed a bug with the new runtime profiler (Chromium issue 121147).
Fixed compilation of V8 using uClibc.
Optimized boot-up memory use.
Optimized regular expressions.
2012-03-30: Version 3.10.0
Fixed store IC writability check in strict mode
(Chromium issue 120099).
Resynchronize timers if the Windows system time was changed.
(Chromium issue 119815)
Removed "-mfloat-abi=hard" from host compiler cflags when building for
hardfp ARM
(https://code.google.com/p/chrome-os-partner/issues/detail?id=8539)
Fixed edge case for case independent regexp character classes
(issue 2032).
Reset function info counters after context disposal.
(Chromium issue 117767, V8 issue 1902)
Fixed missing write barrier in CopyObjectToObjectElements.
(Chromium issue 119926)
Fixed missing bounds check in HasElementImpl.
(Chromium issue 119925)
Performance and stability improvements on all platforms.
2012-03-23: Version 3.9.24
Activated count-based profiler for ARM.

27
deps/v8/DEPS

@ -0,0 +1,27 @@
# Note: The buildbots evaluate this file with CWD set to the parent
# directory and assume that the root of the checkout is in ./v8/, so
# all paths in here must match this assumption.
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1282",
}
deps_os = {
"win": {
"v8/third_party/cygwin":
"http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
"http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
}
}
hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "v8/build/gyp_v8"],
},
]

69
deps/v8/Makefile

@ -150,21 +150,21 @@ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
$(ARCHES): $(addprefix $$@.,$(MODES))
# Defines how to build a particular target (e.g. ia32.release).
$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \
$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX="$(CXX)" LINK="$(LINK)" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile-native
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
native: $(OUTDIR)/Makefile.native
@$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# TODO(jkummerow): add "android.debug" when we need it.
android android.release: $(OUTDIR)/Makefile-android
@$(MAKE) -C "$(OUTDIR)" -f Makefile-android \
android android.release: $(OUTDIR)/Makefile.android
@$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
CXX="$(ANDROID_TOOL_PREFIX)-g++" \
AR="$(ANDROID_TOOL_PREFIX)-ar" \
RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
@ -197,55 +197,41 @@ native.check: native
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)):
rm -f $(OUTDIR)/Makefile-$(basename $@)
$(addsuffix .clean,$(ARCHES)) android.clean:
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
native.clean:
rm -f $(OUTDIR)/Makefile-native
rm -f $(OUTDIR)/Makefile.native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
android.clean:
rm -f $(OUTDIR)/Makefile-android
rm -rf $(OUTDIR)/android.release
find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean
clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
# GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \
-S-ia32 $(GYPFLAGS)
$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
-S-x64 $(GYPFLAGS)
$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS)
$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(MAKEFILES): $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \
-S-mips $(GYPFLAGS)
-Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $@)) \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \
$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S-android $(GYPFLAGS)
-S.android $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT:
ifndef ANDROID_NDK_ROOT
@ -261,9 +247,10 @@ $(ENVFILE): $(ENVFILE).new
# Stores current GYPFLAGS in a file.
$(ENVFILE).new:
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new;
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
echo "CXX=$(CXX)" >> $(ENVFILE).new
# Dependencies.
dependencies:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1026
--revision 1282

4
deps/v8/SConstruct

@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
'CPPDEFINES': ['V8_SHARED'],
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
'CPPDEFINES': ['V8_SHARED']
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
}
},
'os:freebsd': {

36
deps/v8/build/armu.gypi

@ -1,36 +0,0 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'arm',
'armv7': 1,
'arm_neon': 0,
'arm_fpu': 'vfpv3',
},
}

126
deps/v8/build/common.gypi

@ -110,13 +110,6 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['OS!="mac"', {
# TODO(mark): The OS!="mac" conditional is temporary. It can be
# removed once the Mac Chromium build stops setting target_arch to
# ia32 and instead sets it to mac. Other checks in this file for
# OS=="mac" can be removed at that time as well. This can be cleaned
# up once http://crbug.com/44205 is fixed.
'conditions': [
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@ -142,37 +135,32 @@
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP_INSTRUCTIONS',
],
'cflags': [
'-mfloat-abi=hard',
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-mfloat-abi=hard',],
}],
],
}, {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
# The ARM assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64" or OS=="android"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
],
}],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
],
}],
}], # v8_target_arch=="ia32"
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
},
'conditions': [
[ 'target_arch=="mips"', {
['mipscompiler=="yes"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
@ -218,25 +206,21 @@
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
],
}],
}], # v8_target_arch=="mips"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
],
}],
],
}],
'xcode_settings': {
'ARCHS': [ 'x86_64' ],
},
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
@ -254,6 +238,11 @@
'defines': [
'WIN32',
],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
@ -262,20 +251,9 @@
},
},
}],
['OS=="win" and v8_target_arch=="x64"', {
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [
[ 'target_arch=="ia32"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
@ -284,6 +262,41 @@
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
], # conditions
'configurations': {
'Debug': {
@ -310,14 +323,8 @@
},
},
'conditions': [
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -345,12 +352,6 @@
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@ -363,11 +364,6 @@
},
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',

2
deps/v8/build/gyp_v8

@ -1,6 +1,6 @@
#!/usr/bin/python
#
# Copyright 2010 the V8 project authors. All rights reserved.
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:

33
deps/v8/build/mipsu.gypi

@ -1,33 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'target_arch': 'ia32',
'v8_target_arch': 'mips',
},
}

12
deps/v8/build/standalone.gypi

@ -37,8 +37,9 @@
'variables': {
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
# This handles the Linux platforms we generally deal with.
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
@ -46,7 +47,8 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
'host_arch%': 'ia32',
}],
],
@ -71,6 +73,10 @@
'want_separate_host_toolset': 0,
}],
],
# Default ARM variable settings.
'armv7%': 1,
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
},
'target_defaults': {
'default_configuration': 'Debug',

85
deps/v8/include/v8-profiler.h

@ -64,6 +64,7 @@
*/
namespace v8 {
typedef uint32_t SnapshotObjectId;
/**
* CpuProfileNode represents a node in a call graph.
@ -274,7 +275,7 @@ class V8EXPORT HeapGraphNode {
* Returns node id. For the same heap object, the id remains the same
* across all snapshots.
*/
uint64_t GetId() const;
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@ -338,7 +339,7 @@ class V8EXPORT HeapSnapshot {
const HeapGraphNode* GetRoot() const;
/** Returns a node by its id. */
const HeapGraphNode* GetNodeById(uint64_t id) const;
const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
/** Returns total nodes count in the snapshot. */
int GetNodesCount() const;
@ -346,6 +347,9 @@ class V8EXPORT HeapSnapshot {
/** Returns a node by index. */
const HeapGraphNode* GetNode(int index) const;
/** Returns a max seen JS object Id. */
SnapshotObjectId GetMaxSnapshotJSObjectId() const;
/**
* Deletes the snapshot and removes it from HeapProfiler's list.
* All pointers to nodes, edges and paths previously returned become
@ -364,16 +368,20 @@ class V8EXPORT HeapSnapshot {
* with the following structure:
*
* {
* snapshot: {title: "...", uid: nnn},
* nodes: [
* meta-info (JSON string),
* nodes themselves
* ],
* strings: [strings]
* snapshot: {
* title: "...",
* uid: nnn,
* meta: { meta-info },
* node_count: nnn,
* edge_count: nnn
* },
* nodes: [nodes array],
* edges: [edges array],
* strings: [strings array]
* }
*
* Outgoing node links are stored after each node. Nodes reference strings
* and other nodes by their indexes in corresponding arrays.
* Nodes reference strings, other nodes, and edges by their indexes
* in corresponding arrays.
*/
void Serialize(OutputStream* stream, SerializationFormat format) const;
};
@ -404,6 +412,19 @@ class V8EXPORT HeapProfiler {
/** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid);
/**
* Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
*/
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
/**
* A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
* it in case heap profiler cannot find id for the object passed as
* parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
*/
static const SnapshotObjectId kUnknownObjectId = 0;
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
@ -413,6 +434,34 @@ class V8EXPORT HeapProfiler {
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL);
/**
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
* are being registered.
*/
static void StartHeapObjectsTracking();
/**
* Adds a new time interval entry to the aggregated statistics array. The
* time interval entry contains information on the current heap objects
* population size. The method also updates aggregated statistics and
* reports updates for all previous time intervals via the OutputStream
* object. Updates on each time interval are provided as a stream of the
* HeapStatsUpdate structure instances.
* The return value of the function is the last seen heap object Id.
*
* StartHeapObjectsTracking must be called before the first call to this
* method.
*/
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
* calling PushHeapObjectsStats next time.
*/
static void StopHeapObjectsTracking();
/**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
@ -433,6 +482,9 @@ class V8EXPORT HeapProfiler {
/** Returns the number of currently existing persistent handles. */
static int GetPersistentHandleCount();
/** Returns memory used for profiler internal data and snapshots. */
static size_t GetMemorySizeUsedByProfiler();
};
@ -510,6 +562,19 @@ class V8EXPORT RetainedObjectInfo { // NOLINT
};
/**
* A struct for exporting HeapStats data from V8, using "push" model.
* See HeapProfiler::PushHeapObjectsStats.
*/
struct HeapStatsUpdate {
HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
: index(index), count(count), size(size) { }
uint32_t index; // Index of the time interval that was changed.
uint32_t count; // New value of count field for the interval with this index.
uint32_t size; // New value of size field for the interval with this index.
};
} // namespace v8

251
deps/v8/include/v8.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -62,11 +62,13 @@
#else // _WIN32
// Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
// Setup for Linux shared library export.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
@ -98,6 +100,7 @@ class Function;
class Date;
class ImplementationUtilities;
class Signature;
class AccessorSignature;
template <class T> class Handle;
template <class T> class Local;
template <class T> class Persistent;
@ -107,6 +110,7 @@ class Data;
class AccessorInfo;
class StackTrace;
class StackFrame;
class Isolate;
namespace internal {
@ -862,13 +866,13 @@ class Value : public Data {
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
V8EXPORT bool IsUndefined() const;
inline bool IsUndefined() const;
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
V8EXPORT bool IsNull() const;
inline bool IsNull() const;
/**
* Returns true if this value is true.
@ -982,7 +986,11 @@ class Value : public Data {
V8EXPORT bool StrictEquals(Handle<Value> that) const;
private:
inline bool QuickIsUndefined() const;
inline bool QuickIsNull() const;
inline bool QuickIsString() const;
V8EXPORT bool FullIsUndefined() const;
V8EXPORT bool FullIsNull() const;
V8EXPORT bool FullIsString() const;
};
@ -1079,6 +1087,7 @@ class String : public Primitive {
* A zero length string.
*/
V8EXPORT static v8::Local<v8::String> Empty();
inline static v8::Local<v8::String> Empty(Isolate* isolate);
/**
* Returns true if the string is external
@ -1236,8 +1245,7 @@ class String : public Primitive {
* this function should not otherwise delete or modify the resource. Neither
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
V8EXPORT static Local<String> NewExternal(
*/ V8EXPORT static Local<String> NewExternal(
ExternalAsciiStringResource* resource);
/**
@ -1968,10 +1976,13 @@ class Arguments {
inline Local<Object> Holder() const;
inline bool IsConstructCall() const;
inline Local<Value> Data() const;
inline Isolate* GetIsolate() const;
private:
static const int kDataIndex = 0;
static const int kCalleeIndex = -1;
static const int kHolderIndex = -2;
static const int kIsolateIndex = 0;
static const int kDataIndex = -1;
static const int kCalleeIndex = -2;
static const int kHolderIndex = -3;
friend class ImplementationUtilities;
inline Arguments(internal::Object** implicit_args,
@ -1993,9 +2004,11 @@ class V8EXPORT AccessorInfo {
public:
inline AccessorInfo(internal::Object** args)
: args_(args) { }
inline Isolate* GetIsolate() const;
inline Local<Value> Data() const;
inline Local<Object> This() const;
inline Local<Object> Holder() const;
private:
internal::Object** args_;
};
@ -2277,7 +2290,8 @@ class V8EXPORT FunctionTemplate : public Template {
AccessorSetter setter,
Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes);
PropertyAttribute attributes,
Handle<AccessorSignature> signature);
void SetNamedInstancePropertyHandler(NamedPropertyGetter getter,
NamedPropertySetter setter,
NamedPropertyQuery query,
@ -2335,13 +2349,20 @@ class V8EXPORT ObjectTemplate : public Template {
* cross-context access.
* \param attribute The attributes of the property for which an accessor
* is added.
* \param signature The signature describes valid receivers for the accessor
* and is used to perform implicit instance checks against them. If the
* receiver is incompatible (i.e. is not an instance of the constructor as
* defined by FunctionTemplate::HasInstance()), an implicit TypeError is
* thrown and no callback is invoked.
*/
void SetAccessor(Handle<String> name,
AccessorGetter getter,
AccessorSetter setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
PropertyAttribute attribute = None,
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
/**
* Sets a named property handler on the object template.
@ -2445,8 +2466,8 @@ class V8EXPORT ObjectTemplate : public Template {
/**
* A Signature specifies which receivers and arguments a function can
* legally be called with.
* A Signature specifies which receivers and arguments are valid
* parameters to a function.
*/
class V8EXPORT Signature : public Data {
public:
@ -2459,6 +2480,19 @@ class V8EXPORT Signature : public Data {
};
/**
* An AccessorSignature specifies which receivers are valid parameters
* to an accessor callback.
*/
class V8EXPORT AccessorSignature : public Data {
public:
static Local<AccessorSignature> New(Handle<FunctionTemplate> receiver =
Handle<FunctionTemplate>());
private:
AccessorSignature();
};
/**
* A utility for determining the type of objects based on the template
* they were constructed from.
@ -2552,6 +2586,11 @@ Handle<Primitive> V8EXPORT Null();
Handle<Boolean> V8EXPORT True();
Handle<Boolean> V8EXPORT False();
inline Handle<Primitive> Undefined(Isolate* isolate);
inline Handle<Primitive> Null(Isolate* isolate);
inline Handle<Boolean> True(Isolate* isolate);
inline Handle<Boolean> False(Isolate* isolate);
/**
* A set of constraints that specifies the limits of the runtime's memory use.
@ -2802,13 +2841,13 @@ class V8EXPORT Isolate {
/**
* Associate embedder-specific data with the isolate
*/
void SetData(void* data);
inline void SetData(void* data);
/**
* Retrive embedder-specific data from the isolate.
* Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
void* GetData();
inline void* GetData();
private:
Isolate();
@ -3153,7 +3192,8 @@ class V8EXPORT V8 {
* that is kept alive by JavaScript objects.
* \returns the adjusted value.
*/
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
static intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
/**
* Suspends recording of tick samples in the profiler.
@ -3735,6 +3775,12 @@ class V8EXPORT Locker {
};
/**
* A struct for exporting HeapStats data from V8, using "push" model.
*/
struct HeapStatsUpdate;
/**
* An interface for exporting data from V8, using "push" model.
*/
@ -3760,6 +3806,14 @@ class V8EXPORT OutputStream { // NOLINT
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
/**
* Writes the next chunk of heap stats data into the stream. Writing
* can be stopped by returning kAbort as function result. EndOfStream
* will not be called in case writing was aborted.
*/
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
return kAbort;
};
};
@ -3848,18 +3902,6 @@ const uintptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
template <size_t ptr_size> struct InternalConstants;
// Internal constants for 32-bit systems.
template <> struct InternalConstants<4> {
static const int kStringResourceOffset = 3 * kApiPointerSize;
};
// Internal constants for 64-bit systems.
template <> struct InternalConstants<8> {
static const int kStringResourceOffset = 3 * kApiPointerSize;
};
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
@ -3871,18 +3913,31 @@ class Internals {
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset =
InternalConstants<kApiPointerSize>::kStringResourceOffset;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kIsolateStateOffset = 0;
static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
static const int kIsolateRootsOffset = 3 * kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptySymbolRootIndex = 128;
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x82;
static const int kForeignType = 0x85;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
@ -3902,6 +3957,11 @@ class Internals {
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
static inline int GetOddballKind(internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
static inline void* GetExternalPointerFromSmi(internal::Object* value) {
const uintptr_t address = reinterpret_cast<uintptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift);
@ -3922,6 +3982,28 @@ class Internals {
return representation == kExternalTwoByteRepresentationTag;
}
static inline bool IsInitialized(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
return *reinterpret_cast<int*>(addr) == 1;
}
static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
static inline void* GetEmbedderData(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T>
static inline T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
@ -4048,6 +4130,11 @@ Local<Value> Arguments::Data() const {
}
Isolate* Arguments::GetIsolate() const {
return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
}
bool Arguments::IsConstructCall() const {
return is_construct_call_;
}
@ -4160,6 +4247,15 @@ String* String::Cast(v8::Value* value) {
}
Local<String> String::Empty(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Empty();
S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
return Local<String>(reinterpret_cast<String*>(slot));
}
String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
@ -4178,6 +4274,42 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
}
bool Value::IsUndefined() const {
#ifdef V8_ENABLE_CHECKS
return FullIsUndefined();
#else
return QuickIsUndefined();
#endif
}
bool Value::QuickIsUndefined() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
}
bool Value::IsNull() const {
#ifdef V8_ENABLE_CHECKS
return FullIsNull();
#else
return QuickIsNull();
#endif
}
bool Value::QuickIsNull() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
}
bool Value::IsString() const {
#ifdef V8_ENABLE_CHECKS
return FullIsString();
@ -4283,6 +4415,11 @@ External* External::Cast(v8::Value* value) {
}
Isolate* AccessorInfo::GetIsolate() const {
return *reinterpret_cast<Isolate**>(&args_[-3]);
}
Local<Value> AccessorInfo::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
}
@ -4298,6 +4435,54 @@ Local<Object> AccessorInfo::Holder() const {
}
Handle<Primitive> Undefined(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Undefined();
S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
}
Handle<Primitive> Null(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Null();
S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
}
Handle<Boolean> True(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return True();
S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
}
Handle<Boolean> False(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return False();
S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
}
void Isolate::SetData(void* data) {
typedef internal::Internals I;
I::SetEmbedderData(this, data);
}
void* Isolate::GetData() {
typedef internal::Internals I;
return I::GetEmbedderData(this);
}
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the

6
deps/v8/samples/lineprocessor.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -434,9 +434,9 @@ v8::Handle<v8::String> ReadLine() {
}
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined();
return reinterpret_cast<v8::Handle<v8::String>&>(t);
return v8::Handle<v8::String>(v8::String::Cast(*t));
}
// remove newline char
// Remove newline char
for (char* pos = buffer; *pos != '\0'; pos++) {
if (*pos == '\n') {
*pos = '\0';

8
deps/v8/samples/samples.gyp

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -48,6 +48,12 @@
'sources': [
'process.cc',
],
},
{
'target_name': 'lineprocessor',
'sources': [
'lineprocessor.cc',
],
}
],
}

7
deps/v8/samples/shell.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -67,6 +67,8 @@ static bool run_shell;
int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
run_shell = (argc == 1);
int result;
{
v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) {
@ -74,10 +76,11 @@ int main(int argc, char* argv[]) {
return 1;
}
context->Enter();
int result = RunMain(argc, argv);
result = RunMain(argc, argv);
if (run_shell) RunShell(context);
context->Exit();
context.Dispose();
}
v8::V8::Dispose();
return result;
}

1
deps/v8/src/SConscript

@ -68,6 +68,7 @@ SOURCES = {
diy-fp.cc
dtoa.cc
elements.cc
elements-kind.cc
execution.cc
factory.cc
flags.cc

4
deps/v8/src/allocation-inl.h

@ -34,12 +34,12 @@ namespace v8 {
namespace internal {
void* PreallocatedStorage::New(size_t size) {
void* PreallocatedStorageAllocationPolicy::New(size_t size) {
return Isolate::Current()->PreallocatedStorageNew(size);
}
void PreallocatedStorage::Delete(void* p) {
void PreallocatedStorageAllocationPolicy::Delete(void* p) {
return Isolate::Current()->PreallocatedStorageDelete(p);
}

14
deps/v8/src/allocation.h

@ -104,7 +104,7 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
INLINE(static void* New(size_t size)) { return Malloced::New(size); }
INLINE(void* New(size_t size)) { return Malloced::New(size); }
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
};
@ -117,12 +117,6 @@ class PreallocatedStorage {
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
// TODO(isolates): Get rid of these-- we'll have to change the allocator
// interface to include a pointer to an isolate to do this
// efficiently.
static inline void* New(size_t size);
static inline void Delete(void* p);
private:
size_t size_;
PreallocatedStorage* previous_;
@ -137,6 +131,12 @@ class PreallocatedStorage {
};
struct PreallocatedStorageAllocationPolicy {
INLINE(void* New(size_t size));
INLINE(static void Delete(void* ptr));
};
} } // namespace v8::internal
#endif // V8_ALLOCATION_H_

175
deps/v8/src/api.cc

@ -512,6 +512,16 @@ void RegisteredExtension::Register(RegisteredExtension* that) {
}
void RegisteredExtension::UnregisterAll() {
RegisteredExtension* re = first_extension_;
while (re != NULL) {
RegisteredExtension* next = re->next();
delete re;
re = next;
}
}
void RegisterExtension(Extension* that) {
RegisteredExtension* extension = new RegisteredExtension(that);
RegisteredExtension::Register(extension);
@ -980,6 +990,12 @@ Local<Signature> Signature::New(Handle<FunctionTemplate> receiver,
}
Local<AccessorSignature> AccessorSignature::New(
Handle<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
}
Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) {
Handle<FunctionTemplate> types[1] = { type };
return TypeSwitch::New(1, types);
@ -1047,7 +1063,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
@ -1059,6 +1076,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
if (!signature.IsEmpty()) {
obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
}
return obj;
}
@ -1069,7 +1089,8 @@ void FunctionTemplate::AddInstancePropertyAccessor(
AccessorSetter setter,
v8::Handle<Value> data,
v8::AccessControl settings,
v8::PropertyAttribute attributes) {
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate,
"v8::FunctionTemplate::AddInstancePropertyAccessor()")) {
@ -1078,9 +1099,9 @@ void FunctionTemplate::AddInstancePropertyAccessor(
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
settings, attributes,
signature);
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
if (list->IsUndefined()) {
list = NeanderArray().value();
@ -1265,7 +1286,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
AccessorSetter setter,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attribute) {
PropertyAttribute attribute,
v8::Handle<AccessorSignature> signature) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return;
ENTER_V8(isolate);
@ -1279,7 +1301,8 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
setter,
data,
settings,
attribute);
attribute,
signature);
}
@ -2091,17 +2114,21 @@ bool StackFrame::IsConstructor() const {
// --- D a t a ---
bool Value::IsUndefined() const {
bool Value::FullIsUndefined() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
return false;
}
return Utils::OpenHandle(this)->IsUndefined();
bool result = Utils::OpenHandle(this)->IsUndefined();
ASSERT_EQ(result, QuickIsUndefined());
return result;
}
bool Value::IsNull() const {
bool Value::FullIsNull() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
return Utils::OpenHandle(this)->IsNull();
bool result = Utils::OpenHandle(this)->IsNull();
ASSERT_EQ(result, QuickIsNull());
return result;
}
@ -2799,9 +2826,13 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
// When turning on access checks for a global object deoptimize all functions
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*self);
// When deleting a property on the global object using ForceDelete
// deoptimize all functions as optimized code does not check for the hole
// value with DontDelete properties. We have to deoptimize all contexts
// because of possible cross-context inlined functions.
if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
i::Deoptimizer::DeoptimizeAll();
}
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
@ -3061,9 +3092,10 @@ bool Object::SetAccessor(Handle<String> name,
ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
getter, setter, data,
settings, attributes);
v8::Handle<AccessorSignature> signature;
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data,
settings, attributes,
signature);
bool fast = Utils::OpenHandle(this)->HasFastProperties();
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
if (result.is_null() || result->IsUndefined()) return false;
@ -4612,7 +4644,9 @@ void* External::Value() const {
Local<String> v8::String::Empty() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
return v8::Local<String>();
}
LOG_API(isolate, "String::Empty()");
return Utils::ToLocal(isolate->factory()->empty_symbol());
}
@ -5020,7 +5054,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) {
if (!self->HasFastObjectElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@ -5198,7 +5232,7 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
}
int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
return 0;
@ -5378,17 +5412,6 @@ void Isolate::Exit() {
}
void Isolate::SetData(void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetData(data);
}
void* Isolate::GetData() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->GetData();
}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@ -5988,7 +6011,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->From();
const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
@ -6022,7 +6045,7 @@ Handle<String> HeapGraphNode::GetName() const {
}
uint64_t HeapGraphNode::GetId() const {
SnapshotObjectId HeapGraphNode::GetId() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
@ -6036,13 +6059,6 @@ int HeapGraphNode::GetSelfSize() const {
}
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->retained_size();
}
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@ -6054,29 +6070,7 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
&ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
ToInternal(this)->children()[index]);
}
@ -6137,18 +6131,18 @@ const HeapGraphNode* HeapSnapshot::GetRoot() const {
}
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
ToInternal(this)->GetEntryById(id));
}
int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries()->length();
return ToInternal(this)->entries().length();
}
@ -6156,7 +6150,14 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->entries()->at(index));
&ToInternal(this)->entries().at(index));
}
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId");
return ToInternal(this)->max_snapshot_js_object_id();
}
@ -6201,6 +6202,14 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return i::HeapProfiler::GetSnapshotObjectId(obj);
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
ActivityControl* control) {
@ -6220,6 +6229,27 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
}
void HeapProfiler::StartHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
i::HeapProfiler::StartHeapObjectsTracking();
}
void HeapProfiler::StopHeapObjectsTracking() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
i::HeapProfiler::StopHeapObjectsTracking();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
return i::HeapProfiler::PushHeapObjectsStats(stream);
}
void HeapProfiler::DeleteAllSnapshots() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
@ -6240,6 +6270,11 @@ int HeapProfiler::GetPersistentHandleCount() {
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
return i::HeapProfiler::GetMemorySizeUsedByProfiler();
}
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@ -6267,7 +6302,11 @@ static void SetFlagsFromString(const char* flags) {
void Testing::PrepareStressRun(int run) {
static const char* kLazyOptimizations =
"--prepare-always-opt --nolimit-inlining --noalways-opt";
"--prepare-always-opt "
"--max-inlined-source-size=999999 "
"--max-inlined-nodes=999999 "
"--max-inlined-nodes-cumulative=999999 "
"--noalways-opt";
static const char* kForcedOptimizations = "--always-opt";
// If deoptimization stressed turn on frequent deoptimization. If no value

11
deps/v8/src/api.h

@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastElements());
ASSERT(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastElements());
ASSERT(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
@ -146,6 +146,7 @@ class RegisteredExtension {
public:
explicit RegisteredExtension(Extension* extension);
static void Register(RegisteredExtension* that);
static void UnregisterAll();
Extension* extension() { return extension_; }
RegisteredExtension* next() { return next_; }
RegisteredExtension* next_auto() { return next_auto_; }
@ -199,6 +200,8 @@ class Utils {
v8::internal::Handle<v8::internal::ObjectTemplateInfo> obj);
static inline Local<Signature> ToLocal(
v8::internal::Handle<v8::internal::SignatureInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
@ -232,6 +235,8 @@ class Utils {
OpenHandle(const v8::Context* context);
static inline v8::internal::Handle<v8::internal::SignatureInfo>
OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
OpenHandle(const v8::AccessorSignature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Foreign>
@ -275,6 +280,7 @@ MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
@ -299,6 +305,7 @@ MAKE_OPEN_HANDLE(Template, TemplateInfo)
MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
MAKE_OPEN_HANDLE(Signature, SignatureInfo)
MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
MAKE_OPEN_HANDLE(Data, Object)
MAKE_OPEN_HANDLE(RegExp, JSRegExp)

9
deps/v8/src/apiutils.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -40,14 +40,17 @@ class ImplementationUtilities {
}
// Packs additional parameters for the NewArguments function. |implicit_args|
// is a pointer to the last element of 3-elements array controlled by GC.
// is a pointer to the last element of 4-elements array controlled by GC.
static void PrepareArgumentsData(internal::Object** implicit_args,
internal::Isolate* isolate,
internal::Object* data,
internal::JSFunction* callee,
internal::Object* holder) {
implicit_args[v8::Arguments::kDataIndex] = data;
implicit_args[v8::Arguments::kCalleeIndex] = callee;
implicit_args[v8::Arguments::kHolderIndex] = holder;
implicit_args[v8::Arguments::kIsolateIndex] =
reinterpret_cast<internal::Object*>(isolate);
}
static v8::Arguments NewArguments(internal::Object** implicit_args,
@ -55,6 +58,8 @@ class ImplementationUtilities {
bool is_construct_call) {
ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
// The implicit isolate argument is not tagged and looks like a SMI.
ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
return v8::Arguments(implicit_args, argv, argc, is_construct_call);
}

13
deps/v8/src/arguments.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -91,9 +91,11 @@ class CustomArguments : public Relocatable {
Object* data,
Object* self,
JSObject* holder) : Relocatable(isolate) {
values_[2] = self;
values_[1] = holder;
values_[0] = data;
ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
values_[3] = self;
values_[2] = holder;
values_[1] = data;
values_[0] = reinterpret_cast<Object*>(isolate);
}
inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
@ -106,8 +108,9 @@ class CustomArguments : public Relocatable {
void IterateInstance(ObjectVisitor* v);
Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
private:
Object* values_[3];
Object* values_[4];
};

9
deps/v8/src/arm/builtins-arm.cc

@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,

116
deps/v8/src/arm/code-stubs-arm.cc

@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
{
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
masm->add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
masm->Jump(r5);
}
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@ -3956,6 +3960,12 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ jmp(&invoke);
// Block literal pool emission whilst taking the position of the handler
// entry. This avoids making the assumption that literal pools are always
// emitted after an instruction is emitted, rather than before.
{
Assembler::BlockConstPoolScope block_const_pool(masm);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
@ -3964,6 +3974,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
}
__ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit);
@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc.
// inserting instructions here after we read the pc. We block literal pool
// emission for the same reason.
{
Assembler::BlockConstPoolScope block_const_pool(masm);
__ mov(lr, Operand(pc));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// Unlink this frame from the handler chain.
__ PopTryHandler();
@ -4824,27 +4839,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 8;
const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
// Argument 8 (sp[16]): Pass current isolate address.
// Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@ -4893,7 +4913,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@ -5169,9 +5191,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ str(r2, MemOperand(sp, argc_ * kPointerSize));
__ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
@ -5179,9 +5201,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
// Get the map of the function object.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
}
// Fast-case: Invoke the function now.
// r1: pushed function
ParameterCount actual(argc_);
@ -5205,8 +5231,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Slow-case: Non-function called.
__ bind(&slow);
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed.
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
}
// Check for function proxy.
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
@ -5873,36 +5908,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r2: result string length
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
__ cmp(r2, Operand(r4, ASR, 1));
// Return original string.
__ b(eq, &return_r0);
// Longer than original string's length or negative: unsafe arguments.
__ b(hi, &runtime);
// Shorter than original string's length: an actual substring.
Label result_longer_than_two;
// Check for special case of two character ASCII string, in which case
// we do a lookup in the symbol table first.
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
__ b(lt, &runtime);
__ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
// Get the two characters forming the sub string.
__ add(r0, r0, Operand(r3));
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
__ jmp(&return_r0);
// r2: result string length.
// r3: two characters combined into halfword in little endian byte order.
__ bind(&make_two_character_string);
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ jmp(&return_r0);
__ bind(&result_longer_than_two);
// Deal with different string types: update the index if necessary
// and put the underlying string into r5.
// r0: original string
@ -6816,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Prevent literal pool emission during calculation of return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
// Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated.
Label start;
@ -7106,8 +7121,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@ -7176,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
{
// Block literal pool emission, as the position of these two instructions
// is assumed by the patching code.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
}
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
@ -7370,9 +7390,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
__ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7384,7 +7404,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@ -7395,8 +7415,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));

4
deps/v8/src/arm/codegen-arm.cc

@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value

4
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -125,6 +125,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm)

293
deps/v8/src/arm/full-codegen-arm.cc

@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED {
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED {
}
void EmitPatchInfo() {
// Block literal pool emission whilst recording patch site information.
Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
@ -112,13 +111,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 24;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@ -275,11 +267,11 @@ void FullCodeGenerator::Generate() {
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
EmitDeclaration(proxy, proxy->var()->mode(), NULL);
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_HARMONY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
}
@ -351,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
if (FLAG_count_based_interrupts) {
@ -789,62 +783,52 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
}
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
__ Check(ne, "Declaration in with context.");
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
__ Check(ne, "Declaration in catch context.");
}
}
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
bool binding_needs_init = (function == NULL) &&
(mode == CONST || mode == CONST_HARMONY || mode == LET);
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
++global_count_;
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
}
break;
case Variable::CONTEXT:
// The variable in the decl always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
__ Check(ne, "Declaration in with context.");
__ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
__ Check(ne, "Declaration in catch context.");
}
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
@ -853,13 +837,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
break;
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR ||
mode == CONST ||
mode == CONST_HARMONY ||
mode == LET);
ASSERT(mode == VAR || mode == LET ||
mode == CONST || mode == CONST_HARMONY);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
@ -867,11 +849,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (function != NULL) {
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
} else if (binding_needs_init) {
if (hole_init) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@ -885,6 +863,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
}
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ str(result_register(), StackOperand(variable));
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r2, Operand(variable->name()));
__ mov(r1, Operand(Smi::FromInt(NONE)));
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(r1, Operand(instance));
__ str(r1, ContextOperand(cp, variable->index()));
Visit(declaration->module());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED:
// TODO(rossberg)
break;
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
// TODO(rossberg)
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
@ -1511,7 +1605,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@ -1609,7 +1703,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1630,8 +1724,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1659,7 +1752,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
@ -2271,6 +2364,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
if (!Serializer::enabled()) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->id(), cell);
__ mov(r2, Operand(cell));
}
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
@ -3564,7 +3669,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@ -3601,7 +3706,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ b(ne, &bailout);
__ add(string_length, string_length, Operand(scratch2));
__ add(string_length, string_length, Operand(scratch2), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@ -4357,7 +4462,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
@ -4388,14 +4494,55 @@ void FullCodeGenerator::EnterFinallyBlock() {
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
STATIC_ASSERT(kSmiTag == 0);
__ add(r1, r1, Operand(r1)); // Convert to smi.
// Store result register while executing finally block.
__ push(r1);
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
__ push(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ ldr(r1, MemOperand(ip));
__ push(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ ldr(r1, MemOperand(ip));
__ push(r1);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
// Restore pending message from stack.
__ pop(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ str(r1, MemOperand(ip));
__ pop(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ str(r1, MemOperand(ip));
__ pop(r1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ str(r1, MemOperand(ip));
// Restore result register from stack.
__ pop(r1);
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);

53
deps/v8/src/arm/ic-arm.cc

@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
}
void PatchInlinedSmiCode(Address address) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// cmp rx, rx
// b eq, <target>
// b eq/ne, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
// b ne/eq, <target>
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}

208
deps/v8/src/arm/lithium-arm.cc

@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) {
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < inputs_.length(); i++) {
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
inputs_[i]->PrintTo(stream);
InputAt(i)->PrintTo(stream);
}
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
results_[i]->PrintTo(stream);
}
void LInstruction::PrintOutputOperandTo(StringStream* stream) {
if (HasResult()) result()->PrintTo(stream);
}
@ -416,9 +411,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
inlined_closures_(1) {
instructions_(32, graph->zone()),
pointer_maps_(8, graph->zone()),
inlined_closures_(1, graph->zone()) {
}
@ -432,9 +427,9 @@ int LChunk::GetNextSpillIndex(bool is_double) {
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
return LDoubleStackSlot::Create(index, zone());
} else {
return LStackSlot::Create(index);
return LStackSlot::Create(index, zone());
}
}
@ -479,23 +474,23 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
instructions_.Add(gap, zone());
index = instructions_.length();
instructions_.Add(instr);
instructions_.Add(instr, zone());
} else {
index = instructions_.length();
instructions_.Add(instr);
instructions_.Add(gap);
instructions_.Add(instr, zone());
instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
pointer_maps_.Add(instr->pointer_map());
pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id());
return LConstantOperand::Create(constant->id(), zone());
}
@ -534,7 +529,8 @@ int LChunk::NearestGapPos(int index) const {
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
GetGapAt(index)->GetOrCreateParallelMove(
LGap::START, zone())->AddMove(from, to, zone());
}
@ -732,22 +728,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
}
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
@ -760,8 +740,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
if (hinstr->HasObservableSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
instr, sim->ast_id());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
// If instruction does not have side-effects lazy deoptimization
@ -779,15 +761,9 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
}
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
instr->MarkAsSaveDoubles();
return instr;
}
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new(zone()) LPointerMap(position_));
instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
return instr;
}
@ -1010,7 +986,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
outer,
zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1295,6 +1272,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
if (instr->HasNoUses()) return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LBitNotI(value));
}
@ -1319,6 +1297,76 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
uint32_t divisor_abs = abs(divisor);
// Dividing by 0, 1, and powers of 2 is easy.
// Note that IsPowerOf2(0) returns true;
ASSERT(IsPowerOf2(0) == true);
if (IsPowerOf2(divisor_abs)) return true;
// We have magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
return false;
}
HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
// A value with an integer representation does not need to be transformed.
if (dividend->representation().IsInteger32()) {
return dividend;
// A change from an integer32 can be replaced by the integer32 value.
} else if (dividend->IsChange() &&
HChange::cast(dividend)->from().IsInteger32()) {
return HChange::cast(dividend)->value();
}
return NULL;
}
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
// Only optimize when we have magic numbers for the divisor.
// The standard integer division routine is usually slower than transitionning
// to VFP.
if (divisor->IsConstant() &&
HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegisterOrConstant(right);
LOperand* remainder = TempRegister();
ASSERT(right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@ -1612,7 +1660,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1661,10 +1710,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegisterAtStart(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
if (!needs_check) {
res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
if (instr->value()->type().IsSmi()) {
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@ -1753,9 +1801,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckMap(value);
LInstruction* result = new(zone()) LCheckMaps(value);
return AssignEnvironment(result);
}
@ -2037,8 +2085,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
@ -2059,16 +2108,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = needs_write_barrier_for_map
? UseRegister(instr->object())
: UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
return new(zone()) LStoreNamedField(obj, val);
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
return new(zone()) LStoreNamedField(obj, val, temp);
}
@ -2111,7 +2172,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
@ -2242,9 +2304,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LInstruction* result = new(zone()) LLazyBailout;
result = AssignEnvironment(result);
// Store the lazy deopt environment with the instruction if needed. Right
// now it is only used for LInstanceOfKnownGlobal.
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
return result;
}
@ -2271,8 +2336,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
undefined,
instr->call_kind(),
instr->is_construct());
if (instr->arguments() != NULL) {
inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
@ -2281,10 +2346,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
LInstruction* pop = NULL;
HEnvironment* env = current_block_->last_environment();
if (instr->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
}
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
return pop;
}

116
deps/v8/src/arm/lithium-arm.h

@ -72,7 +72,7 @@ class LCodeGen;
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMap) \
V(CheckMaps) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
@ -132,6 +132,7 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MathFloorOfDiv) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -179,7 +180,8 @@ class LCodeGen;
V(CheckMapValue) \
V(LoadFieldByIndex) \
V(DateField) \
V(WrapReceiver)
V(WrapReceiver) \
V(Drop)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -203,15 +205,14 @@ class LInstruction: public ZoneObject {
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false),
is_save_doubles_(false) { }
is_call_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream);
enum Opcode {
// Declare a unique enum value for each instruction.
@ -246,22 +247,12 @@ class LInstruction: public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
void set_deoptimization_environment(LEnvironment* env) {
deoptimization_environment_.set(env);
}
LEnvironment* deoptimization_environment() const {
return deoptimization_environment_.get();
}
bool HasDeoptimizationEnvironment() const {
return deoptimization_environment_.is_set();
}
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
@ -282,9 +273,7 @@ class LInstruction: public ZoneObject {
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
bool is_save_doubles_;
};
@ -306,9 +295,6 @@ class LTemplateInstruction: public LInstruction {
int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; }
virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
@ -347,8 +333,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
LAST_INNER_POSITION = AFTER
};
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
if (parallel_moves_[pos] == NULL) {
parallel_moves_[pos] = new(zone) LParallelMove(zone);
}
return parallel_moves_[pos];
}
@ -534,9 +522,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
};
@ -582,6 +569,21 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
};
class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
public:
LMathFloorOfDiv(LOperand* left,
LOperand* right,
LOperand* temp = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@ -834,6 +836,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
lazy_deopt_env_ = env;
}
private:
LEnvironment* lazy_deopt_env_;
};
@ -1227,6 +1238,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1243,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1263,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1378,6 +1391,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
class LDrop: public LTemplateInstruction<0, 0, 0> {
public:
explicit LDrop(int count) : count_(count) { }
int count() const { return count_; }
DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
private:
int count_;
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@ -1460,6 +1486,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
};
@ -1659,11 +1686,12 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamedField(LOperand* obj, LOperand* val) {
LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
inputs_[0] = obj;
inputs_[1] = val;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
@ -1717,6 +1745,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1739,6 +1768,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1781,6 +1813,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1889,14 +1922,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
};
class LCheckMap: public LTemplateInstruction<0, 1, 0> {
class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
explicit LCheckMaps(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
@ -2236,9 +2269,11 @@ class LChunk: public ZoneObject {
}
void AddInlinedClosure(Handle<JSFunction> closure) {
inlined_closures_.Add(closure);
inlined_closures_.Add(closure, zone());
}
Zone* zone() const { return graph_->zone(); }
private:
int spill_slot_count_;
CompilationInfo* info_;
@ -2255,7 +2290,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
zone_(graph->isolate()->zone()),
zone_(graph->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@ -2274,6 +2309,10 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
private:
enum Status {
UNUSED,
@ -2369,11 +2408,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);

458
deps/v8/src/arm/lithium-codegen-arm.cc

@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
@ -631,14 +634,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
++jsframe_count;
}
}
Translation translation(&translations_, frame_count, jsframe_count);
Translation translation(&translations_, frame_count, jsframe_count,
zone());
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment);
deoptimizations_.Add(environment, zone());
}
}
@ -670,7 +674,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry)) {
deopt_jump_table_.Add(JumpTableEntry(entry));
deopt_jump_table_.Add(JumpTableEntry(entry), zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
@ -715,7 +719,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal);
deoptimization_literals_.Add(literal, zone());
return result;
}
@ -761,14 +765,14 @@ void LCodeGen::RecordSafepoint(
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
safepoint.DefinePointerRegister(cp, zone());
}
}
@ -780,7 +784,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
@ -1034,6 +1038,100 @@ void LCodeGen::DoModI(LModI* instr) {
}
void LCodeGen::EmitSignedIntegerDivisionByConstant(
Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment) {
ASSERT(!AreAliased(dividend, scratch, ip));
ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
uint32_t divisor_abs = abs(divisor);
int32_t power_of_2_factor =
CompilerIntrinsics::CountTrailingZeros(divisor_abs);
switch (divisor_abs) {
case 0:
DeoptimizeIf(al, environment);
return;
case 1:
if (divisor > 0) {
__ Move(result, dividend);
} else {
__ rsb(result, dividend, Operand(0), SetCC);
DeoptimizeIf(vs, environment);
}
// Compute the remainder.
__ mov(remainder, Operand(0));
return;
default:
if (IsPowerOf2(divisor_abs)) {
// Branch and condition free code for integer division by a power
// of two.
int32_t power = WhichPowerOf2(divisor_abs);
if (power > 1) {
__ mov(scratch, Operand(dividend, ASR, power - 1));
}
__ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
__ mov(result, Operand(scratch, ASR, power));
// Negate if necessary.
// We don't need to check for overflow because the case '-1' is
// handled separately.
if (divisor < 0) {
ASSERT(divisor != -1);
__ rsb(result, result, Operand(0));
}
// Compute the remainder.
if (divisor > 0) {
__ sub(remainder, dividend, Operand(result, LSL, power));
} else {
__ add(remainder, dividend, Operand(result, LSL, power));
}
return;
} else {
// Use magic numbers for a few specific divisors.
// Details and proofs can be found in:
// - Hacker's Delight, Henry S. Warren, Jr.
// - The PowerPC Compiler Writer’s Guide
// and probably many others.
//
// We handle
// <divisor with magic numbers> * <power of 2>
// but not
// <divisor with magic numbers> * <other divisor with magic numbers>
DivMagicNumbers magic_numbers =
DivMagicNumberFor(divisor_abs >> power_of_2_factor);
// Branch and condition free code for integer division by a power
// of two.
const int32_t M = magic_numbers.M;
const int32_t s = magic_numbers.s + power_of_2_factor;
__ mov(ip, Operand(M));
__ smull(ip, scratch, dividend, ip);
if (M < 0) {
__ add(scratch, scratch, Operand(dividend));
}
if (s > 0) {
__ mov(scratch, Operand(scratch, ASR, s));
}
__ add(result, scratch, Operand(dividend, LSR, 31));
if (divisor < 0) __ rsb(result, result, Operand(0));
// Compute the remainder.
__ mov(ip, Operand(divisor));
// This sequence could be replaced with 'mls' when
// it gets implemented.
__ mul(scratch, result, ip);
__ sub(remainder, dividend, scratch);
}
}
}
void LCodeGen::DoDivI(LDivI* instr) {
class DeferredDivI: public LDeferredCode {
public:
@ -1096,7 +1194,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr);
DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
__ TrySmiTag(right, &deoptimize, scratch);
@ -1115,6 +1213,34 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->InputAt(0));
const Register remainder = ToRegister(instr->TempAt(0));
const Register scratch = scratch0();
// We only optimize this for division by constants, because the standard
// integer division routine is usually slower than transitionning to VFP.
// This could be optimized on processors with SDIV available.
ASSERT(instr->InputAt(1)->IsConstantOperand());
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
if (divisor < 0) {
__ cmp(left, Operand(0));
DeoptimizeIf(eq, instr->environment());
}
EmitSignedIntegerDivisionByConstant(result,
left,
divisor,
remainder,
scratch,
instr->environment());
// We operated a truncating division. Correct the result if necessary.
__ cmp(remainder, Operand(0));
__ teq(remainder, Operand(divisor), ne);
__ sub(result, result, Operand(1), LeaveCC, mi);
}
template<int T>
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
@ -1562,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@ -2174,7 +2303,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->InputAt(0));
@ -2193,6 +2322,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss;
Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
{
// Block constant pool emission to ensure the positions of instructions are
// as expected by the patcher. See InstanceofStub::Generate().
Assembler::BlockConstPoolScope block_const_pool(masm());
__ bind(deferred->map_check()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
@ -2207,6 +2340,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch
// with true or false.
__ mov(result, Operand(factory()->the_hole_value()));
}
__ b(&done);
// The inlined call site cache did not match. Check null and string before
@ -2267,8 +2401,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasDeoptimizationEnvironment());
LEnvironment* env = instr->deoptimization_environment();
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value into the result register slot and
// restore all registers.
@ -2438,12 +2571,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name) {
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsFound() && lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@ -2455,9 +2588,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
} else {
} else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
// Negative lookup.
// Check prototypes.
HeapObject* current = HeapObject::cast((*type)->prototype());
Heap* heap = type->GetHeap();
while (current != heap->null_value()) {
Handle<HeapObject> link(current);
__ LoadHeapObject(result, link);
__ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
__ cmp(result, Operand(Handle<Map>(JSObject::cast(current)->map())));
DeoptimizeIf(ne, env);
current = HeapObject::cast(current->map()->prototype());
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
}
}
@ -2465,44 +2612,46 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register object_map = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
__ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count - 1; ++i) {
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
Label check_passed;
__ CompareMap(
object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
} else {
Label next;
__ cmp(scratch, Operand(map));
__ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ bind(&check_passed);
EmitLoadFieldOrConstantFunction(
result, object, map, name, instr->environment());
__ b(&done);
__ bind(&next);
}
Handle<Map> map = instr->hydrogen()->types()->last();
__ cmp(scratch, Operand(map));
if (instr->hydrogen()->need_generic()) {
Label generic;
__ b(ne, &generic);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&generic);
}
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
}
__ bind(&done);
}
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
@ -2579,8 +2728,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(eq, &done);
__ cmp(scratch, Operand(GetInitialFastElementsKind()));
__ b(lt, &fail);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@ -2627,15 +2778,22 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ tst(result, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr->environment());
}
}
}
void LCodeGen::DoLoadKeyedFastDoubleElement(
@ -2659,18 +2817,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) +
? Operand(((constant_key + instr->additional_index()) << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0);
}
@ -2692,26 +2853,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size))
? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), 0);
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), 0);
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
? MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset)
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@ -2739,9 +2907,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -2764,6 +2935,9 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
__ sub(result, sp, Operand(2 * kPointerSize));
} else {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
__ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -2775,6 +2949,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ mov(result, fp, LeaveCC, ne);
__ mov(result, scratch, LeaveCC, eq);
}
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
@ -2882,7 +3057,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ b(ne, &loop);
__ bind(&invoke);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
@ -2907,6 +3082,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
}
void LCodeGen::DoDrop(LDrop* instr) {
__ Drop(instr->count());
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
@ -2953,7 +3133,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind) {
CallKind call_kind,
R1State r1_state) {
bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
function->shared()->formal_parameter_count() == arity;
@ -2961,7 +3142,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
RecordPosition(pointers->position());
if (can_invoke_directly) {
if (r1_state == R1_UNINITIALIZED) {
__ LoadHeapObject(r1, function);
}
// Change context if needed.
bool change_context =
(info()->closure()->context() != function->context()) ||
@ -3000,7 +3184,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
CallKnownFunction(instr->function(),
instr->arity(),
instr,
CALL_AS_METHOD);
CALL_AS_METHOD,
R1_UNINITIALIZED);
}
@ -3109,7 +3294,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} else {
// Representation is tagged.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input, deferred->entry());
@ -3286,7 +3471,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
LRandom* instr_;
};
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
@ -3424,13 +3609,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
ASSERT(instr->HasDeoptimizationEnvironment());
if (instr->known_function().is_null()) {
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
CallKnownFunction(instr->known_function(),
instr->arity(),
instr,
CALL_AS_METHOD,
R1_CONTAINS_TARGET);
}
}
@ -3485,7 +3678,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
CallKnownFunction(instr->target(),
instr->arity(),
instr,
CALL_AS_FUNCTION,
R1_UNINITIALIZED);
}
@ -3515,6 +3712,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (!instr->transition().is_null()) {
__ mov(scratch, Operand(instr->transition()));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the map field.
__ RecordWriteField(object,
HeapObject::kMapOffset,
scratch,
temp,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
}
// Do the store.
@ -3583,10 +3792,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@ -3615,7 +3830,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Register scratch = scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
Label not_nan;
// Calculate the effective address of the slot in the array to store the
// double value.
@ -3629,7 +3843,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) +
? Operand((constant_key << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@ -3638,14 +3852,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
// Only load canonical NaN if the comparison above set the overflow.
__ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
__ Vmov(value,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
vs);
}
__ bind(&not_nan);
__ vstr(value, scratch, 0);
__ vstr(value, scratch, instr->additional_index() << shift_size);
}
@ -3666,25 +3882,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
Operand operand(key_is_constant ? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), 0);
__ vstr(double_scratch0().low(), scratch0(), additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), 0);
__ vstr(value, scratch0(), additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
? MemOperand(external_pointer,
((constant_key + instr->additional_index())
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -3703,7 +3927,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3740,20 +3967,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
@ -3787,7 +4016,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
};
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
@ -3842,7 +4071,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
};
DeferredStringCharFromCode* deferred =
new DeferredStringCharFromCode(this, instr);
new(zone()) DeferredStringCharFromCode(this, instr);
ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
@ -3916,7 +4145,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
Register src = ToRegister(instr->InputAt(0));
Register dst = ToRegister(instr->result());
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
@ -3987,7 +4216,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
@ -4189,7 +4418,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(input);
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
// Optimistically untag the input.
// If the input is a HeapObject, SmiUntag will set the carry flag.
@ -4338,14 +4567,22 @@ void LCodeGen::DoCheckMapCommon(Register reg,
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register scratch = scratch0();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Handle<Map> map = instr->hydrogen()->map();
DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
instr->environment());
Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
__ b(eq, &success);
}
Handle<Map> map = map_set->last();
DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
@ -4441,7 +4678,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
DeferredAllocateObject* deferred =
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
@ -4464,6 +4702,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
deferred->entry(),
TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
Label is_in_new_space;
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
__ Abort("Allocated object is not in new-space");
__ bind(&is_in_new_space);
}
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
@ -4482,14 +4728,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ str(scratch, FieldMemOperand(result, property_offset));
}
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
@ -4497,9 +4743,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
__ mov(result, Operand(0));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ LoadHeapObject(r0, constructor);
__ mov(r0, Operand(Smi::FromInt(instance_size)));
__ push(r0);
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result);
}
@ -4511,8 +4757,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@ -4633,9 +4880,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ str(r2, FieldMemOperand(result, total_offset + 4));
}
} else if (elements->IsFixedArray()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
Handle<Object> value(fast_elements->get(i));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
@ -4659,6 +4907,24 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Load the map's "bit field 2".
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
__ cmp(r2, Operand(boilerplate_elements_kind));
DeoptimizeIf(ne, instr->environment());
}
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
@ -4923,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
@ -4954,7 +5222,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register strict = scratch0();
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
@ -4967,7 +5235,7 @@ void LCodeGen::DoIn(LIn* instr) {
Register obj = ToRegister(instr->object());
Register key = ToRegister(instr->key());
__ Push(key, obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
@ -5017,7 +5285,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
new DeferredStackCheck(this, instr);
new(zone()) DeferredStackCheck(this, instr);
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(lo, deferred_stack_check->entry());

41
deps/v8/src/arm/lithium-codegen-arm.h

@ -43,22 +43,26 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
Zone* zone)
: chunk_(chunk),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
deopt_jump_table_(4),
deoptimization_literals_(8),
deoptimizations_(4, zone),
deopt_jump_table_(4, zone),
deoptimization_literals_(8, zone),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
deferred_(8),
translations_(zone),
deferred_(8, zone),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
zone_(zone),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -71,6 +75,7 @@ class LCodeGen BASE_EMBEDDED {
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@ -176,7 +181,7 @@ class LCodeGen BASE_EMBEDDED {
void Abort(const char* format, ...);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Code generation passes. Returns true if code generation should
// continue.
@ -215,12 +220,18 @@ class LCodeGen BASE_EMBEDDED {
int argc,
LInstruction* instr);
enum R1State {
R1_UNINITIALIZED,
R1_CONTAINS_TARGET
};
// Generate a direct call to a known function. Expects the function
// to be in r1.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind);
CallKind call_kind,
R1State r1_state);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@ -308,7 +319,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name);
Handle<String> name,
LEnvironment* env);
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
@ -317,6 +329,17 @@ class LCodeGen BASE_EMBEDDED {
Register source,
int* offset);
// Emit optimized code for integer division.
// Inputs are signed.
// All registers are clobbered.
// If 'remainder' is no_reg, it is not computed.
void EmitSignedIntegerDivisionByConstant(Register result,
Register dividend,
int32_t divisor,
Register remainder,
Register scratch,
LEnvironment* environment);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
@ -349,6 +372,8 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
Zone* zone_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;

4
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -36,7 +36,7 @@ namespace internal {
static const Register kSavedValueRegister = { 9 };
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { }
@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}

119
deps/v8/src/arm/macro-assembler-arm.cc

@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
@ -1995,24 +2000,27 @@ void MacroAssembler::CompareMap(Register obj,
Label* early_success,
CompareMapMode mode) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
CompareMap(scratch, map, early_success, mode);
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
void MacroAssembler::CompareMap(Register obj_map,
Handle<Map> map,
Label* early_success,
CompareMapMode mode) {
cmp(obj_map, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
cmp(obj_map, Operand(Handle<Map>(current_map)));
}
}
}
}
@ -2865,28 +2873,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
cmp(map_in_out, ip);
ldr(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, scratch);
b(ne, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
ldr(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);
@ -3710,22 +3728,35 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
#endif
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.

23
deps/v8/src/arm/macro-assembler-arm.h

@ -85,7 +85,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
#endif
// MacroAssembler implements a collection of frequently used macros.
@ -505,7 +512,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
@ -795,7 +803,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
@ -823,6 +831,13 @@ class MacroAssembler: public Assembler {
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
@ -1321,7 +1336,6 @@ class MacroAssembler: public Assembler {
};
#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
@ -1351,7 +1365,6 @@ class CodePatcher {
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
#endif // ENABLE_DEBUGGER_SUPPORT
// -----------------------------------------------------------------------------

210
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -43,28 +43,31 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - r8 : points to tip of backtrack stack
* - r8 : Points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
* - r13/sp : points to tip of C stack.
* - r13/sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* - fp[56] Isolate* isolate (address of the current isolate)
* - fp[52] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
* - fp[48] stack_area_base (high end of the memory area to use as
* backtracking stack).
* - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
@ -72,16 +75,17 @@ namespace internal {
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-24] At start (if 1, we are starting at the start of the
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first
* - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@ -115,8 +119,10 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
int registers_to_save)
: masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@ -197,9 +203,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, &not_at_start);
BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -212,9 +218,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_not_at_start);
BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@ -432,16 +438,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
}
void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
int reg2,
Label* on_not_equal) {
__ ldr(r0, register_location(reg1));
__ ldr(r1, register_location(reg2));
__ cmp(r0, r1);
BranchOrBacktrack(ne, on_not_equal);
}
void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
__ cmp(current_character(), Operand(c));
@ -452,8 +448,12 @@ void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal) {
if (c == 0) {
__ tst(current_character(), Operand(mask));
} else {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));
}
BranchOrBacktrack(eq, on_equal);
}
@ -461,8 +461,12 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
if (c == 0) {
__ tst(current_character(), Operand(mask));
} else {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));
}
BranchOrBacktrack(ne, on_not_equal);
}
@ -480,6 +484,44 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
}
void RegExpMacroAssemblerARM::CheckCharacterInRange(
uc16 from,
uc16 to,
Label* on_in_range) {
__ sub(r0, current_character(), Operand(from));
__ cmp(r0, Operand(to - from));
BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
}
void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
uc16 from,
uc16 to,
Label* on_not_in_range) {
__ sub(r0, current_character(), Operand(from));
__ cmp(r0, Operand(to - from));
BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
}
void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
__ add(r1,
current_character(),
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
}
__ ldrb(r0, MemOperand(r0, r1));
__ cmp(r0, Operand(0));
BranchOrBacktrack(ne, on_bit_set);
}
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
@ -609,6 +651,7 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -632,8 +675,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@ -652,13 +696,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_r0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &exit_label_);
__ b(ne, &return_r0);
__ bind(&stack_ok);
@ -679,16 +723,26 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_));
@ -697,23 +751,17 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ str(r0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
__ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@ -740,6 +788,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in r4 for the zero-length check later.
__ mov(r4, r2);
}
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@ -751,10 +803,58 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ add(r0, r0, Operand(1));
__ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
__ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// r4: capture start index
__ cmp(current_input_offset(), r4);
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
}
__ b(&load_char_start_regexp);
} else {
__ mov(r0, Operand(SUCCESS));
}
}
// Exit and return r0
__ bind(&exit_label_);
if (global()) {
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@ -776,7 +876,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &exit_label_);
__ b(ne, &return_r0);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -813,7 +913,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_r0);
}
CodeDesc code_desc;
@ -968,8 +1068,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
void RegExpMacroAssemblerARM::Succeed() {
bool RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
return global();
}
@ -1261,8 +1362,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
offset = r0;
// r4 is not being used to store the capture start index at this point.
__ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
offset = r4;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.

24
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -45,7 +45,7 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
#else // V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerARM();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
@ -70,7 +70,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
@ -79,6 +78,14 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
@ -105,7 +112,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -129,7 +136,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@ -141,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize;
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

12
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*);
void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \

340
deps/v8/src/arm/stub-cache-arm.cc

@ -435,22 +435,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
int index,
Handle<Map> transition,
Handle<String> name,
Register receiver_reg,
Register name_reg,
Register scratch,
Register scratch1,
Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
LookupResult lookup(masm->isolate());
object->Lookup(*name, &lookup);
if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
// In sloppy mode, we could just return the value and be done. However, we
// might be in strict mode, where we have to throw. Since we cannot tell,
// go into slow case unconditionally.
__ jmp(miss_label);
return;
}
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
// Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
JSObject* holder;
if (lookup.IsFound()) {
holder = lookup.holder();
} else {
// Find the top object.
holder = *object;
do {
holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject());
}
// We need an extra register, push
__ push(name_reg);
Label miss_pop, done_check;
CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, &miss_pop);
__ jmp(&done_check);
__ bind(&miss_pop);
__ pop(name_reg);
__ jmp(miss_label);
__ bind(&done_check);
__ pop(name_reg);
}
// Stub never generated for non-global objects that require access
@ -473,10 +510,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
if (!transition.is_null()) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
__ mov(ip, Operand(transition));
__ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the map of the object.
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
// Adjust for the number of properties stored in the object. Even in the
@ -498,15 +545,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(r0, FieldMemOperand(scratch, offset));
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(r0, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(r0, &exit);
@ -514,7 +562,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0);
__ RecordWriteField(scratch,
__ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
@ -582,6 +630,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
__ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address()));
__ push(scratch);
}
@ -596,7 +646,7 @@ static void CompileCallLoadPropertyWithInterceptor(
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
__ mov(r0, Operand(5));
__ mov(r0, Operand(6));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
@ -604,9 +654,9 @@ static void CompileCallLoadPropertyWithInterceptor(
}
static const int kFastApiCallArguments = 3;
static const int kFastApiCallArguments = 4;
// Reserves space for the extra arguments to FastHandleApiCall in the
// Reserves space for the extra arguments to API function in the
// caller's frame.
//
// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
@ -632,7 +682,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : last JS argument
// -- sp[12] : isolate
// -- sp[16] : last JS argument
// -- ...
// -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
@ -642,7 +693,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@ -651,13 +702,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
// Store JS function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
__ mov(r7, Operand(ExternalReference::isolate_address()));
// Store JS function, call data and isolate.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
// r2 points to call data as expected by Arguments
// (refer to layout above).
__ add(r2, sp, Operand(2 * kPointerSize));
// Prepare arguments.
__ add(r2, sp, Operand(3 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
@ -666,9 +719,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// r0 = v8::Arguments&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// v8::Arguments::implicit_args = data
// v8::Arguments::implicit_args_
__ str(r2, MemOperand(r0, 0 * kPointerSize));
// v8::Arguments::values = last argument
// v8::Arguments::values_
__ add(ip, r2, Operand(argc * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// v8::Arguments::length_ = argc
@ -845,7 +898,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
masm->isolate()),
5);
6);
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
@ -1204,7 +1257,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
} else {
__ Move(scratch3, Handle<Object>(callback->data()));
}
__ Push(reg, scratch3, name_reg);
__ Push(reg, scratch3);
__ mov(scratch3, Operand(ExternalReference::isolate_address()));
__ Push(scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String>
const int kApiStackSpace = 1;
@ -1216,7 +1271,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
const int kStackUnwindSpace = 4;
const int kStackUnwindSpace = 5;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference ref =
@ -1252,8 +1307,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
compile_followup_inline =
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
}
@ -1344,20 +1400,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (!receiver.is(holder_reg)) {
ASSERT(scratch1.is(holder_reg));
__ Push(receiver, holder_reg);
__ ldr(scratch3,
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
__ Push(scratch3, scratch2, name_reg);
} else {
__ push(receiver);
__ push(holder_reg);
}
__ ldr(scratch3,
FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
__ Push(holder_reg, scratch3, scratch2, name_reg);
}
__ mov(scratch1, Operand(ExternalReference::isolate_address()));
__ Push(scratch3, scratch1, scratch2, name_reg);
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
masm()->isolate());
__ TailCallExternalReference(ref, 5, 1);
__ TailCallExternalReference(ref, 6, 1);
}
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
@ -1371,7 +1426,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
masm()->isolate());
__ TailCallExternalReference(ref, 5, 1);
__ TailCallExternalReference(ref, 6, 1);
}
}
@ -1575,16 +1630,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
__ CheckFastSmiElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@ -1739,7 +1807,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6);
__ b(eq, &call_builtin);
@ -1747,7 +1815,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole.
__ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1);
__ Ret();
@ -2539,7 +2607,13 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// -----------------------------------
Label miss;
GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
GenerateStoreField(masm(),
object,
index,
transition,
name,
r1, r2, r3, r4,
&miss);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
@ -2594,6 +2668,51 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> receiver,
Handle<JSFunction> setter,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(r0);
// Call the JavaScript getter with the receiver and the value on the stack.
__ Push(r1, r0);
ParameterCount actual(1);
__ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
// We have to return the passed value, not the return value of the setter.
__ pop(r0);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(CALLBACKS, name);
}
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> receiver,
Handle<String> name) {
@ -2761,6 +2880,44 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
}
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the maps haven't changed.
__ JumpIfSmi(r0, &miss);
CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
// Call the JavaScript getter with the receiver on the stack.
__ push(r0);
ParameterCount actual(0);
__ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(CALLBACKS, name);
}
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value,
@ -3085,7 +3242,13 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
GenerateStoreField(masm(),
object,
index,
transition,
name,
r2, r1, r3, r4,
&miss);
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@ -3366,8 +3529,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3377,6 +3543,44 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
Register scratch1,
DwVfpRegister double_scratch0,
Label* fail) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
double_scratch0.low(),
double_scratch0,
scratch0,
scratch1,
kCheckForInexactConversion);
__ b(ne, fail);
__ vmov(scratch0, double_scratch0.low());
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
} else {
// Check that the key is a smi.
__ JumpIfNotSmi(key, fail);
}
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@ -3393,8 +3597,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r3: elements array
@ -3453,8 +3657,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3724,8 +3931,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -3794,8 +4001,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3858,8 +4068,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3998,8 +4211,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4050,8 +4266,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(r0, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
// Get the elements array.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@ -4102,8 +4318,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
// Get the elements array.
__ ldr(elements_reg,
@ -4178,10 +4394,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@ -4209,7 +4425,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4219,7 +4435,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4345,7 +4561,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));

112
deps/v8/src/array.js

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -465,15 +465,19 @@ function ArrayPush() {
}
// Returns an array containing the array elements of the object followed
// by the array elements of each argument in order. See ECMA-262,
// section 15.4.4.7.
function ArrayConcat(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.concat"]);
}
var array = ToObject(this);
var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count);
arrays[0] = this;
arrays[0] = array;
for (var i = 0; i < arg_count; i++) {
arrays[i + 1] = %_Arguments(i);
}
@ -1027,14 +1031,29 @@ function ArrayFilter(f, receiver) {
var result = new $Array();
var accumulator = new InternalArray();
var accumulator_length = 0;
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (%_CallFunction(receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
}
}
// End of duplicate.
}
%MoveArrayContents(accumulator, result);
return result;
}
@ -1059,13 +1078,25 @@ function ArrayForEach(f, receiver) {
} else if (!IS_SPEC_OBJECT(receiver)) {
receiver = ToObject(receiver);
}
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
%_CallFunction(receiver, element, i, array, f);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
%_CallFunction(receiver, element, i, array, f);
}
}
// End of duplicate.
}
}
@ -1091,12 +1122,25 @@ function ArraySome(f, receiver) {
receiver = ToObject(receiver);
}
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (%_CallFunction(receiver, element, i, array, f)) return true;
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (%_CallFunction(receiver, element, i, array, f)) return true;
}
}
// End of duplicate.
}
return false;
}
@ -1121,12 +1165,25 @@ function ArrayEvery(f, receiver) {
receiver = ToObject(receiver);
}
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
if (!%_CallFunction(receiver, element, i, array, f)) return false;
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
if (!%_CallFunction(receiver, element, i, array, f)) return false;
}
}
// End of duplicate.
}
return true;
}
@ -1152,12 +1209,25 @@ function ArrayMap(f, receiver) {
var result = new $Array();
var accumulator = new InternalArray(length);
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(f);
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (var i = 0; i < length; i++) {
if (i in array) {
var element = array[i];
accumulator[i] = %_CallFunction(receiver, element, i, array, f);
}
}
// End of duplicate.
}
%MoveArrayContents(accumulator, result);
return result;
}
@ -1311,12 +1381,28 @@ function ArrayReduce(callback, current) {
}
var receiver = %GetDefaultReceiver(callback);
if (%DebugCallbackSupportsStepping(callback)) {
for (; i < length; i++) {
if (i in array) {
var element = array[i];
current = %_CallFunction(receiver, current, element, i, array, callback);
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(callback);
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (; i < length; i++) {
if (i in array) {
var element = array[i];
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
// End of duplicate.
}
return current;
}
@ -1348,11 +1434,27 @@ function ArrayReduceRight(callback, current) {
}
var receiver = %GetDefaultReceiver(callback);
if (%DebugCallbackSupportsStepping(callback)) {
for (; i >= 0; i--) {
if (i in array) {
var element = array[i];
current = %_CallFunction(receiver, current, element, i, array, callback);
// Prepare break slots for debugger step in.
%DebugPrepareStepInIfStepping(callback);
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
} else {
// This is a duplicate of the previous loop sans debug stepping.
for (; i >= 0; i--) {
if (i in array) {
var element = array[i];
current =
%_CallFunction(receiver, current, element, i, array, callback);
}
}
// End of duplicate.
}
return current;
}

95
deps/v8/src/assembler.cc

@ -99,21 +99,7 @@ struct DoubleConstant BASE_EMBEDDED {
double the_hole_nan;
};
struct InitializeDoubleConstants {
static void Construct(DoubleConstant* double_constants) {
double_constants->min_int = kMinInt;
double_constants->one_half = 0.5;
double_constants->minus_zero = -0.0;
double_constants->uint8_max_value = 255;
double_constants->zero = 0.0;
double_constants->canonical_non_hole_nan = OS::nan_value();
double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants->negative_infinity = -V8_INFINITY;
}
};
static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
double_constants = LAZY_INSTANCE_INITIALIZER;
static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@ -726,6 +712,18 @@ void RelocInfo::Verify() {
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
}
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
: address_(Redirect(isolate, Builtins::c_function_address(id))) {}
@ -957,51 +955,66 @@ ExternalReference ExternalReference::scheduled_exception_address(
}
ExternalReference ExternalReference::address_of_pending_message_obj(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_obj_address());
}
ExternalReference ExternalReference::address_of_has_pending_message(
Isolate* isolate) {
return ExternalReference(isolate->has_pending_message_address());
}
ExternalReference ExternalReference::address_of_pending_message_script(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_script_address());
}
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->min_int));
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->one_half));
return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
}
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->minus_zero));
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_zero));
}
ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->zero));
return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->uint8_max_value));
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint8_max_value));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->negative_infinity));
return ExternalReference(
reinterpret_cast<void*>(&double_constants.negative_infinity));
}
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->canonical_non_hole_nan));
return ExternalReference(
reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
&double_constants.Pointer()->the_hole_nan));
return ExternalReference(
reinterpret_cast<void*>(&double_constants.the_hole_nan));
}
@ -1138,6 +1151,12 @@ ExternalReference ExternalReference::math_log_double_function(
}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1158,6 +1177,20 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
#ifdef __MINGW64_VERSION_MAJOR
// MinGW64 has a custom implementation for pow. This handles certain
// special cases that are different.
if ((x == 0.0 || isinf(x)) && isfinite(y)) {
double f;
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
}
if (x == 2.0) {
int y_int = static_cast<int>(y);
if (y == y_int) return ldexp(1.0, y_int);
}
#endif
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();

7
deps/v8/src/assembler.h

@ -539,6 +539,8 @@ class ExternalReference BASE_EMBEDDED {
DIRECT_GETTER_CALL
};
static void SetUp();
typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
@ -638,6 +640,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
static ExternalReference address_of_has_pending_message(Isolate* isolate);
static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
@ -654,6 +659,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference page_flags(Page* page);
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT

267
deps/v8/src/ast.cc

@ -242,8 +242,11 @@ bool IsEqualNumber(void* first, void* second) {
}
void ObjectLiteral::CalculateEmitStore() {
ZoneHashMap table(Literal::Match);
void ObjectLiteral::CalculateEmitStore(Zone* zone) {
ZoneAllocationPolicy allocator(zone);
ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity,
allocator);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
@ -252,23 +255,23 @@ void ObjectLiteral::CalculateEmitStore() {
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED &&
table.Lookup(literal, hash, false) != NULL) {
table.Lookup(literal, hash, false, allocator) != NULL) {
property->set_emit_store(false);
} else {
// Add key to the table.
table.Lookup(literal, hash, true);
table.Lookup(literal, hash, true, allocator);
}
}
}
void TargetCollector::AddTarget(Label* target) {
void TargetCollector::AddTarget(Label* target, Zone* zone) {
// Add the label to the collector, but discard duplicates.
int length = targets_.length();
for (int i = 0; i < length; i++) {
if (targets_[i] == target) return;
}
targets_.Add(target);
targets_.Add(target, zone);
}
@ -397,7 +400,8 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
// Record type feedback from the oracle in the AST.
is_uninitialized_ = oracle->LoadIsUninitialized(this);
if (is_uninitialized_) return;
@ -421,15 +425,17 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this));
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism);
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
@ -441,22 +447,23 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism);
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
receiver_types_.Clear();
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this));
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism);
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
}
}
@ -507,7 +514,6 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
// We don't know the target.
return false;
case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Perhaps something interesting is up in the prototype chain...
@ -784,7 +790,7 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// output formats are alike.
class RegExpUnparser: public RegExpVisitor {
public:
RegExpUnparser();
explicit RegExpUnparser(Zone* zone);
void VisitCharacterRange(CharacterRange that);
SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data);
@ -794,10 +800,11 @@ class RegExpUnparser: public RegExpVisitor {
StringStream* stream() { return &stream_; }
HeapStringAllocator alloc_;
StringStream stream_;
Zone* zone_;
};
RegExpUnparser::RegExpUnparser() : stream_(&alloc_) {
RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
}
@ -837,9 +844,9 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
if (that->is_negated())
stream()->Add("^");
stream()->Add("[");
for (int i = 0; i < that->ranges()->length(); i++) {
for (int i = 0; i < that->ranges(zone_)->length(); i++) {
if (i > 0) stream()->Add(" ");
VisitCharacterRange(that->ranges()->at(i));
VisitCharacterRange(that->ranges(zone_)->at(i));
}
stream()->Add("]");
return NULL;
@ -941,8 +948,8 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
}
SmartArrayPointer<const char> RegExpTree::ToString() {
RegExpUnparser unparser;
SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
RegExpUnparser unparser(zone);
Accept(&unparser, NULL);
return unparser.ToString();
}
@ -962,6 +969,14 @@ RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
}
static int IncreaseBy(int previous, int increase) {
if (RegExpTree::kInfinity - previous < increase) {
return RegExpTree::kInfinity;
} else {
return previous + increase;
}
}
RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
: nodes_(nodes) {
ASSERT(nodes->length() > 1);
@ -969,13 +984,10 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
max_match_ = 0;
for (int i = 0; i < nodes->length(); i++) {
RegExpTree* node = nodes->at(i);
min_match_ += node->min_match();
int node_min_match = node->min_match();
min_match_ = IncreaseBy(min_match_, node_min_match);
int node_max_match = node->max_match();
if (kInfinity - max_match_ < node_max_match) {
max_match_ = kInfinity;
} else {
max_match_ += node->max_match();
}
max_match_ = IncreaseBy(max_match_, node_max_match);
}
}
@ -993,138 +1005,78 @@ CaseClause::CaseClause(Isolate* isolate,
}
#define INCREASE_NODE_COUNT(NodeType) \
#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
}
INCREASE_NODE_COUNT(VariableDeclaration)
INCREASE_NODE_COUNT(FunctionDeclaration)
INCREASE_NODE_COUNT(ModuleDeclaration)
INCREASE_NODE_COUNT(ImportDeclaration)
INCREASE_NODE_COUNT(ExportDeclaration)
INCREASE_NODE_COUNT(ModuleLiteral)
INCREASE_NODE_COUNT(ModuleVariable)
INCREASE_NODE_COUNT(ModulePath)
INCREASE_NODE_COUNT(ModuleUrl)
INCREASE_NODE_COUNT(Block)
INCREASE_NODE_COUNT(ExpressionStatement)
INCREASE_NODE_COUNT(EmptyStatement)
INCREASE_NODE_COUNT(IfStatement)
INCREASE_NODE_COUNT(ContinueStatement)
INCREASE_NODE_COUNT(BreakStatement)
INCREASE_NODE_COUNT(ReturnStatement)
INCREASE_NODE_COUNT(Conditional)
INCREASE_NODE_COUNT(Literal)
INCREASE_NODE_COUNT(ObjectLiteral)
INCREASE_NODE_COUNT(Assignment)
INCREASE_NODE_COUNT(Throw)
INCREASE_NODE_COUNT(Property)
INCREASE_NODE_COUNT(UnaryOperation)
INCREASE_NODE_COUNT(CountOperation)
INCREASE_NODE_COUNT(BinaryOperation)
INCREASE_NODE_COUNT(CompareOperation)
INCREASE_NODE_COUNT(ThisFunction)
INCREASE_NODE_COUNT(Call)
INCREASE_NODE_COUNT(CallNew)
#undef INCREASE_NODE_COUNT
void AstConstructionVisitor::VisitWithStatement(WithStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) {
increase_node_count();
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitTryFinallyStatement(
TryFinallyStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
increase_node_count();
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* node) {
increase_node_count();
add_flag(kDontOptimize);
add_flag(kDontInline);
}
void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
increase_node_count();
// In theory, we'd have to add:
// if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
// However, node->var() is usually not bound yet at VariableProxy creation
// time, and LOOKUP variables only result from constructs that cannot
// be inlined anyway.
}
void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontOptimize); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
}
void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
#define DONT_INLINE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontInline); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
REGULAR_NODE(VariableDeclaration)
REGULAR_NODE(FunctionDeclaration)
REGULAR_NODE(Block)
REGULAR_NODE(ExpressionStatement)
REGULAR_NODE(EmptyStatement)
REGULAR_NODE(IfStatement)
REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ObjectLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
REGULAR_NODE(UnaryOperation)
REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
REGULAR_NODE(Call)
REGULAR_NODE(CallNew)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleLiteral)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
DONT_INLINE_NODE(FunctionLiteral)
DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
@ -1142,6 +1094,11 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
}
}
#undef REGULAR_NODE
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
Handle<String> Literal::ToString() {
if (handle_->IsString()) return Handle<String>::cast(handle_);

81
deps/v8/src/ast.h

@ -266,16 +266,17 @@ class Statement: public AstNode {
class SmallMapList {
public:
SmallMapList() {}
explicit SmallMapList(int capacity) : list_(capacity) {}
SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
void Reserve(int capacity) { list_.Reserve(capacity); }
void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); }
void Clear() { list_.Clear(); }
void Sort() { list_.Sort(); }
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
void Add(Handle<Map> handle) {
list_.Add(handle.location());
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
Handle<Map> at(int i) const {
@ -415,13 +416,15 @@ class Block: public BreakableStatement {
public:
DECLARE_NODE_TYPE(Block)
void AddStatement(Statement* statement) { statements_.Add(statement); }
void AddStatement(Statement* statement, Zone* zone) {
statements_.Add(statement, zone);
}
ZoneList<Statement*>* statements() { return &statements_; }
bool is_initializer_block() const { return is_initializer_block_; }
Scope* block_scope() const { return block_scope_; }
void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
protected:
template<class> friend class AstNodeFactory;
@ -429,17 +432,18 @@ class Block: public BreakableStatement {
Block(Isolate* isolate,
ZoneStringList* labels,
int capacity,
bool is_initializer_block)
bool is_initializer_block,
Zone* zone)
: BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity),
statements_(capacity, zone),
is_initializer_block_(is_initializer_block),
block_scope_(NULL) {
scope_(NULL) {
}
private:
ZoneList<Statement*> statements_;
bool is_initializer_block_;
Scope* block_scope_;
Scope* scope_;
};
@ -594,7 +598,7 @@ class Module: public AstNode {
Interface* interface() const { return interface_; }
protected:
Module() : interface_(Interface::NewModule()) {}
explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {}
explicit Module(Interface* interface) : interface_(interface) {}
private:
@ -607,6 +611,7 @@ class ModuleLiteral: public Module {
DECLARE_NODE_TYPE(ModuleLiteral)
Block* body() const { return body_; }
Handle<Context> context() const { return context_; }
protected:
template<class> friend class AstNodeFactory;
@ -618,6 +623,7 @@ class ModuleLiteral: public Module {
private:
Block* body_;
Handle<Context> context_;
};
@ -647,8 +653,9 @@ class ModulePath: public Module {
protected:
template<class> friend class AstNodeFactory;
ModulePath(Module* module, Handle<String> name)
: module_(module),
ModulePath(Module* module, Handle<String> name, Zone* zone)
: Module(zone),
module_(module),
name_(name) {
}
@ -667,7 +674,8 @@ class ModuleUrl: public Module {
protected:
template<class> friend class AstNodeFactory;
explicit ModuleUrl(Handle<String> url) : url_(url) {
ModuleUrl(Handle<String> url, Zone* zone)
: Module(zone), url_(url) {
}
private:
@ -1095,12 +1103,12 @@ class IfStatement: public Statement {
// stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode {
public:
TargetCollector() : targets_(0) { }
explicit TargetCollector(Zone* zone) : targets_(0, zone) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
void AddTarget(Label* target);
void AddTarget(Label* target, Zone* zone);
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
@ -1358,7 +1366,7 @@ class ObjectLiteral: public MaterializedLiteral {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
void CalculateEmitStore();
void CalculateEmitStore(Zone* zone);
enum Flags {
kNoFlags = 0,
@ -1523,7 +1531,7 @@ class Property: public Expression {
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
bool IsArrayLength() { return is_array_length_; }
@ -1796,7 +1804,7 @@ class CountOperation: public Expression {
virtual void MarkAsStatement() { is_prefix_ = true; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@ -1949,7 +1957,7 @@ class Assignment: public Expression {
void mark_block_end() { block_end_ = true; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone);
virtual bool IsMonomorphic() { return is_monomorphic_; }
virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@ -2208,8 +2216,8 @@ class RegExpTree: public ZoneObject {
// Returns the interval of registers used for captures within this
// expression.
virtual Interval CaptureRegisters() { return Interval::Empty(); }
virtual void AppendToText(RegExpText* text);
SmartArrayPointer<const char> ToString();
virtual void AppendToText(RegExpText* text, Zone* zone);
SmartArrayPointer<const char> ToString(Zone* zone);
#define MAKE_ASTYPE(Name) \
virtual RegExp##Name* As##Name(); \
virtual bool Is##Name();
@ -2294,7 +2302,7 @@ class CharacterSet BASE_EMBEDDED {
explicit CharacterSet(ZoneList<CharacterRange>* ranges)
: ranges_(ranges),
standard_set_type_(0) {}
ZoneList<CharacterRange>* ranges();
ZoneList<CharacterRange>* ranges(Zone* zone);
uc16 standard_set_type() { return standard_set_type_; }
void set_standard_set_type(uc16 special_set_type) {
standard_set_type_ = special_set_type;
@ -2325,11 +2333,11 @@ class RegExpCharacterClass: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return 1; }
virtual int max_match() { return 1; }
virtual void AppendToText(RegExpText* text);
virtual void AppendToText(RegExpText* text, Zone* zone);
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
// recognizes a mangled standard set and just do { return set_.is_special(); }
bool is_standard();
bool is_standard(Zone* zone);
// Returns a value representing the standard character set if is_standard()
// returns true.
// Currently used values are:
@ -2342,7 +2350,7 @@ class RegExpCharacterClass: public RegExpTree {
// . : non-unicode non-newline
// * : All characters
uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() { return is_negated_; }
private:
@ -2362,7 +2370,7 @@ class RegExpAtom: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return data_.length(); }
virtual int max_match() { return data_.length(); }
virtual void AppendToText(RegExpText* text);
virtual void AppendToText(RegExpText* text, Zone* zone);
Vector<const uc16> data() { return data_; }
int length() { return data_.length(); }
private:
@ -2372,7 +2380,7 @@ class RegExpAtom: public RegExpTree {
class RegExpText: public RegExpTree {
public:
RegExpText() : elements_(2), length_(0) {}
explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@ -2381,9 +2389,9 @@ class RegExpText: public RegExpTree {
virtual bool IsTextElement() { return true; }
virtual int min_match() { return length_; }
virtual int max_match() { return length_; }
virtual void AppendToText(RegExpText* text);
void AddElement(TextElement elm) {
elements_.Add(elm);
virtual void AppendToText(RegExpText* text, Zone* zone);
void AddElement(TextElement elm, Zone* zone) {
elements_.Add(elm, zone);
length_ += elm.length();
}
ZoneList<TextElement>* elements() { return &elements_; }
@ -2691,20 +2699,21 @@ class AstNodeFactory BASE_EMBEDDED {
}
ModulePath* NewModulePath(Module* origin, Handle<String> name) {
ModulePath* module = new(zone_) ModulePath(origin, name);
ModulePath* module = new(zone_) ModulePath(origin, name, zone_);
VISIT_AND_RETURN(ModulePath, module)
}
ModuleUrl* NewModuleUrl(Handle<String> url) {
ModuleUrl* module = new(zone_) ModuleUrl(url);
ModuleUrl* module = new(zone_) ModuleUrl(url, zone_);
VISIT_AND_RETURN(ModuleUrl, module)
}
Block* NewBlock(ZoneStringList* labels,
int capacity,
bool is_initializer_block) {
bool is_initializer_block,
Zone* zone) {
Block* block = new(zone_) Block(
isolate_, labels, capacity, is_initializer_block);
isolate_, labels, capacity, is_initializer_block, zone);
VISIT_AND_RETURN(Block, block)
}

38
deps/v8/src/bootstrapper.cc

@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->
set_instance_descriptors(heap->empty_descriptor_array());
object_function_map->set_instance_descriptors(
heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@ -516,12 +516,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
function_without_prototype_map->instance_descriptors());
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
empty_function_map->set_prototype(
global_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
return empty_function;
}
@ -1011,7 +1009,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->empty_string());
heap->query_colon_symbol());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@ -1094,7 +1092,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements());
ASSERT(result->HasFastObjectElements());
#endif
}
@ -1187,7 +1185,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements());
ASSERT(result->HasFastObjectElements());
#endif
}
@ -1634,10 +1632,11 @@ bool Genesis::InstallNatives() {
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map =
array_function->initial_map()->CopyDropTransitions();
array_function->initial_map()->CopyDropTransitions(
DescriptorArray::MAY_BE_SHARED);
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false;
new_map->set_elements_kind(FAST_ELEMENTS);
if (!maybe_map->To(&new_map)) return false;
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
@ -2094,14 +2093,10 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
builtins->set_javascript_builtin(id, *function);
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
return false;
}
// Set the code object on the function object.
function->ReplaceCode(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code());
builtins->set_javascript_builtin_code(id, function->shared()->code());
}
return true;
}
@ -2159,7 +2154,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = PropertyDetails(descs->GetDetails(i));
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
HandleScope inner;
@ -2197,7 +2192,6 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
break;
}
case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Ignore non-properties.

151
deps/v8/src/builtins.cc

@ -200,9 +200,12 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context();
if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
!global_context->object_js_array_map()->IsUndefined()) {
array->set_map(Map::cast(global_context->object_js_array_map()));
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!global_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(global_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
}
} else {
@ -222,6 +225,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
}
ElementsKind elements_kind = array->GetElementsKind();
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe_array =
array->TransitionElementsKind(elements_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
// We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
@ -250,7 +260,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (IsFastDoubleElementsKind(elements_kind)) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
@ -261,13 +271,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS: {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
}
break;
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -277,6 +289,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
}
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
@ -412,7 +425,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastElements()) return elms;
if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
@ -422,7 +435,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastElements() ||
if (args == NULL || array->HasFastObjectElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
@ -516,8 +529,8 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@ -588,7 +601,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@ -630,7 +643,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -652,8 +665,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@ -682,7 +695,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastTypeElements() ||
if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -698,7 +711,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastTypeElements();
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -763,9 +776,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
elements_kind, 0, result_len);
return result_array;
}
@ -786,7 +799,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
@ -837,9 +850,9 @@ BUILTIN(ArraySplice) {
{
// Fill newly created array.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
elements_kind, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -888,12 +901,13 @@ BUILTIN(ArraySplice) {
{
// Copy the part before actual_start as is.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, FAST_ELEMENTS,
CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
new_elms, kind,
actual_start + item_count, to_copy);
}
@ -940,11 +954,12 @@ BUILTIN(ArrayConcat) {
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
ElementsKind elements_kind = GetInitialFastElementsKind();
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
if (!arg->IsJSArray() ||
!JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -961,10 +976,20 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
if (!JSArray::cast(arg)->HasFastSmiElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
}
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
// Allocate result.
JSArray* result_array;
@ -982,8 +1007,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, elements_kind,
start_pos, len);
start_pos += len;
}
@ -1103,7 +1128,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
data_obj, *function, raw_holder);
isolate, data_obj, *function, raw_holder);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(),
@ -1143,68 +1168,6 @@ BUILTIN(HandleApiCallConstruct) {
}
#ifdef DEBUG
static void VerifyTypeCheck(Handle<JSObject> object,
Handle<JSFunction> function) {
ASSERT(function->shared()->IsApiFunction());
FunctionTemplateInfo* info = function->shared()->get_api_func_data();
if (info->signature()->IsUndefined()) return;
SignatureInfo* signature = SignatureInfo::cast(info->signature());
Object* receiver_type = signature->receiver();
if (receiver_type->IsUndefined()) return;
FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
ASSERT(object->IsInstanceOf(type));
}
#endif
BUILTIN(FastHandleApiCall) {
ASSERT(!CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
const bool is_construct = false;
// We expect four more arguments: callback, function, call data, and holder.
const int args_length = args.length() - 4;
ASSERT(args_length >= 0);
Object* callback_obj = args[args_length];
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
&args[args_length + 1],
&args[0] - 1,
args_length - 1,
is_construct);
#ifdef DEBUG
VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
Utils::OpenHandle(*new_args.Callee()));
#endif
HandleScope scope(isolate);
Object* result;
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(isolate, EXTERNAL);
ExternalCallbackScope call_scope(isolate,
v8::ToCData<Address>(callback_obj));
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
value = callback(new_args);
}
if (value.IsEmpty()) {
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
return result;
}
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
@ -1243,7 +1206,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
CustomArguments custom(isolate);
v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
call_data->data(), constructor, obj);
isolate, call_data->data(), constructor, obj);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
custom.end(),
&args[0] - 1,

1
deps/v8/src/builtins.h

@ -56,7 +56,6 @@ enum BuiltinExtraArguments {
V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
\
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \

35
deps/v8/src/bytecodes-irregexp.h

@ -72,24 +72,23 @@ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32 */ \
V(CHECK_LT, 32, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 33, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 34, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
V(LOOKUP_MAP1, 37, 12) /* bc8 pad8 start16 bit_map_addr32 addr32 */ \
V(LOOKUP_MAP2, 38, 96) /* bc8 pad8 start16 half_nibble_map_addr32* */ \
V(LOOKUP_MAP8, 39, 96) /* bc8 pad8 start16 byte_map addr32* */ \
V(LOOKUP_HI_MAP8, 40, 96) /* bc8 start24 byte_map_addr32 addr32* */ \
V(CHECK_REGISTER_LT, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_GE, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \
V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code;

51
deps/v8/src/code-stubs.cc

@ -73,21 +73,12 @@ SmartArrayPointer<const char> CodeStub::GetName() {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
Isolate* isolate = masm->isolate();
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
code->Disassemble(*name);
PrintF("\n");
}
#endif
}
@ -125,8 +116,16 @@ Handle<Code> CodeStub::GetCode() {
GetICState());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
new_object->set_major_key(MajorKey());
FinishCode(new_object);
RecordCodeGeneration(*new_object, &masm);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
new_object->Disassemble(*GetName());
PrintF("\n");
}
#endif
if (UseSpecialCache()) {
AddToSpecialCache(new_object);
@ -263,10 +262,13 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
@ -293,7 +295,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: {
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
@ -301,6 +305,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
}
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
@ -431,24 +436,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (to_ == FAST_ELEMENTS) {
if (from_ == FAST_SMI_ONLY_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
} else if (from_ == FAST_DOUBLE_ELEMENTS) {
if (IsFastSmiOrObjectElementsKind(to_)) {
if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
FAST_ELEMENTS,
to_,
grow_mode_);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
} else if (IsFastSmiElementsKind(from_) &&
IsFastDoubleElementsKind(to_)) {
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else {
UNREACHABLE();
}

1
deps/v8/src/code-stubs.h

@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
code->set_compare_operation(op_);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }

6
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateSmiOnlyToObject(MacroAssembler* masm);
static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:

17
deps/v8/src/compiler-intrinsics.h

@ -40,6 +40,9 @@ class CompilerIntrinsics {
// Returns number of zero bits following most significant 1 bit.
// Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value));
// Returns the number of bits set.
INLINE(static int CountSetBits(uint32_t value));
};
#ifdef __GNUC__
@ -51,6 +54,10 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return __builtin_clz(value);
}
int CompilerIntrinsics::CountSetBits(uint32_t value) {
return __builtin_popcount(value);
}
#elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward)
@ -68,6 +75,16 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return 31 - static_cast<int>(result);
}
int CompilerIntrinsics::CountSetBits(uint32_t value) {
// Manually count set bits.
value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
return value;
}
#else
#error Unsupported compiler
#endif

21
deps/v8/src/compiler.cc

@ -294,8 +294,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context, info->isolate());
HGraphBuilder builder(info, &oracle);
TypeFeedbackOracle oracle(code, global_context, info->isolate(),
info->isolate()->zone());
HGraphBuilder builder(info, &oracle, info->isolate()->zone());
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
if (info->isolate()->has_pending_exception()) {
@ -304,7 +305,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
if (graph != NULL) {
Handle<Code> optimized_code = graph->Compile(info);
Handle<Code> optimized_code = graph->Compile(info, graph->zone());
if (!optimized_code.is_null()) {
info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
@ -346,7 +347,8 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
// the compilation info is set if compilation succeeded.
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
info->isolate()->zone());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@ -420,7 +422,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(),
lit->materialized_literal_count(),
info->code(),
ScopeInfo::Create(info->scope()));
ScopeInfo::Create(info->scope(), info->isolate()->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@ -462,7 +464,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
live_edit_tracker.RecordFunctionInfo(result, lit);
live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
return result;
}
@ -651,7 +653,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this.
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->isolate()->zone());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
@ -728,7 +731,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
scope_info = ScopeInfo::Create(info.scope());
scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
} else {
return Handle<SharedFunctionInfo>::null();
}
@ -747,7 +750,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
live_edit_tracker.RecordFunctionInfo(result, literal);
live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
return result;
}

22
deps/v8/src/contexts.h

@ -106,9 +106,7 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@ -248,9 +246,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
SMI_JS_ARRAY_MAP_INDEX,
DOUBLE_JS_ARRAY_MAP_INDEX,
OBJECT_JS_ARRAY_MAP_INDEX,
JS_ARRAY_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@ -373,18 +369,6 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \
@ -397,7 +381,7 @@ class Context: public FixedArray {
GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context.
// Lookup the slot called name, starting with the current context.
// There are three possibilities:
//
// 1) result->IsContext():

4
deps/v8/src/conversions-inl.h

@ -228,9 +228,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent
// and sign. Assuming it's a rare case more simple code is used.
return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
return ldexp(static_cast<double>(negative ? -number : number), exponent);
}

239
deps/v8/src/d8.cc

@ -26,7 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows.
// Defined when linking against shared lib on Windows.
#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
#define V8_SHARED
#endif
@ -315,151 +316,143 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
}
const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
size_t element_size) {
TryCatch try_catch;
bool is_array_buffer_construct = element_size == 0;
if (is_array_buffer_construct) {
type = v8::kExternalByteArray;
element_size = 1;
}
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
element_size == 8);
if (args.Length() == 0) {
return ThrowException(
String::New("Array constructor must have at least one "
"parameter."));
Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
}
bool first_arg_is_array_buffer =
args[0]->IsObject() &&
args[0]->ToObject()->Get(
String::New(kArrayBufferMarkerPropName))->IsTrue();
// Currently, only the following constructors are supported:
// TypedArray(unsigned long length)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
if (args.Length() > 3) {
return ThrowException(
String::New("Array constructor from ArrayBuffer must "
"have 1-3 parameters."));
uint8_t* data = new uint8_t[length];
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
}
memset(data, 0, length);
Local<Value> length_value = (args.Length() < 3)
? (first_arg_is_array_buffer
? args[0]->ToObject()->Get(String::New("length"))
: args[0])
: args[2];
size_t length = convertToUint(length_value, &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
V8::AdjustAmountOfExternalAllocatedMemory(length);
void* data = NULL;
size_t offset = 0;
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
Handle<Object> array = Object::New();
if (first_arg_is_array_buffer) {
Handle<Object> derived_from = args[0]->ToObject();
data = derived_from->GetIndexedPropertiesExternalArrayData();
return buffer;
}
size_t array_buffer_length = convertToUint(
derived_from->Get(String::New("length")),
&try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (data == NULL && array_buffer_length != 0) {
Handle<Value> Shell::CreateExternalArrayBuffer(const Arguments& args) {
if (args.Length() == 0) {
return ThrowException(
String::New("ArrayBuffer doesn't have data"));
String::New("ArrayBuffer constructor must have one parameter."));
}
if (args.Length() > 1) {
offset = convertToUint(args[1], &try_catch);
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
// The given byteOffset must be a multiple of the element size of the
// specific type, otherwise an exception is raised.
if (offset % element_size != 0) {
return ThrowException(
String::New("offset must be multiple of element_size"));
}
return CreateExternalArrayBuffer(length);
}
if (offset > array_buffer_length) {
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
TryCatch try_catch;
ASSERT(element_size == 1 || element_size == 2 ||
element_size == 4 || element_size == 8);
// Currently, only the following constructors are supported:
// TypedArray(unsigned long length)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
Handle<Object> buffer;
int32_t length;
int32_t byteLength;
int32_t byteOffset;
if (args.Length() == 0) {
return ThrowException(
String::New("byteOffset must be less than ArrayBuffer length."));
}
String::New("Array constructor must have at least one parameter."));
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
buffer = args[0]->ToObject();
int32_t bufferLength =
convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (args.Length() == 2) {
// If length is not explicitly specified, the length of the ArrayBuffer
// minus the byteOffset must be a multiple of the element size of the
// specific type, or an exception is raised.
length = array_buffer_length - offset;
if (args.Length() < 2 || args[1]->IsUndefined()) {
byteOffset = 0;
} else {
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (byteOffset > bufferLength) {
return ThrowException(String::New("byteOffset out of bounds"));
}
if (args.Length() != 3) {
if (length % element_size != 0) {
if (byteOffset % element_size != 0) {
return ThrowException(
String::New("ArrayBuffer length minus the byteOffset must be a "
"multiple of the element size"));
String::New("byteOffset must be multiple of element_size"));
}
length /= element_size;
}
// If a given byteOffset and length references an area beyond the end of
// the ArrayBuffer an exception is raised.
if (offset + (length * element_size) > array_buffer_length) {
if (args.Length() < 3 || args[2]->IsUndefined()) {
byteLength = bufferLength - byteOffset;
length = byteLength / element_size;
if (byteLength % element_size != 0) {
return ThrowException(
String::New("length references an area beyond the end of the "
"ArrayBuffer"));
}
// Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
String::New("buffer size must be multiple of element_size"));
}
if (is_array_buffer_construct) {
array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
return ThrowException(String::New("length out of bounds"));
}
Persistent<Object> persistent_array = Persistent<Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
if (data == NULL && length != 0) {
data = calloc(length, element_size);
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
}
} else {
length = convertToUint(args[0], &try_catch);
byteLength = length * element_size;
byteOffset = 0;
Handle<Value> result = CreateExternalArrayBuffer(byteLength);
if (!result->IsObject()) return result;
buffer = result->ToObject();
}
void* data = buffer->GetIndexedPropertiesExternalArrayData();
ASSERT(data != NULL);
Handle<Object> array = Object::New();
array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type,
static_cast<int>(length));
array->Set(String::New("length"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"),
Int32::New(static_cast<int32_t>(element_size)));
static_cast<uint8_t*>(data) + byteOffset, type, length);
array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
array->Set(String::New("buffer"), buffer, ReadOnly);
return array;
}
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
Handle<Object> converted_object = object->ToObject();
Local<Value> prop_value = converted_object->Get(prop_name);
if (data != NULL && !prop_value->IsObject()) {
free(data);
}
int32_t length =
object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose();
}
Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, 0);
return CreateExternalArrayBuffer(args);
}
@ -806,8 +799,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbinary"),
FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readbuffer"),
FunctionTemplate::New(ReadBuffer));
global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@ -977,8 +970,8 @@ void Shell::OnExit() {
printf("+--------------------------------------------+-------------+\n");
delete [] counters;
}
if (counters_file_ != NULL)
delete counters_file_;
delete counter_map_;
}
#endif // V8_SHARED
@ -1026,20 +1019,30 @@ static char* ReadChars(const char* name, int* size_out) {
}
Handle<Value> Shell::ReadBinary(const Arguments& args) {
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int size;
int length;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
char* chars = ReadChars(*filename, &size);
if (chars == NULL) {
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
return ThrowException(String::New("Error reading file"));
}
// We skip checking the string for UTF8 characters and use it raw as
// backing store for the external string with 8-bit characters.
BinaryResource* resource = new BinaryResource(chars, size);
return String::NewExternal(resource);
Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
}
@ -1203,7 +1206,7 @@ void SourceGroup::Execute() {
Handle<String> SourceGroup::ReadFile(const char* name) {
int size;
const char* chars = ReadChars(name, &size);
char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;

6
deps/v8/src/d8.h

@ -307,7 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBinary(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin();
@ -383,9 +383,11 @@ class Shell : public i::AllStatic {
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Value> CreateExternalArrayBuffer(int32_t size);
static Handle<Value> CreateExternalArrayBuffer(const Arguments& args);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
size_t element_size);
int32_t element_size);
static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
};

2
deps/v8/src/d8.js

@ -2174,7 +2174,7 @@ function DebugResponseDetails(response) {
}
var current_line = from_line + num;
spacer = maxdigits - (1 + Math.floor(log10(current_line)));
var spacer = maxdigits - (1 + Math.floor(log10(current_line)));
if (current_line == Debug.State.currentSourceLine + 1) {
for (var i = 0; i < maxdigits; i++) {
result += '>';

3
deps/v8/src/dateparser-inl.h

@ -148,6 +148,9 @@ bool DateParser::Parse(Vector<Char> str,
} else {
// Garbage words are illegal if a number has been read.
if (has_read_number) return false;
// The first number has to be separated from garbage words by
// whitespace or other separators.
if (scanner.Peek().IsNumber()) return false;
}
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).

38
deps/v8/src/debug-agent.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -157,7 +157,9 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) {
ScopedLock with(session_access_);
ASSERT(session == session_);
if (session == session_) {
CloseSession();
session_->Shutdown();
delete session_;
session_ = NULL;
}
}
@ -247,7 +249,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received <= 0) {
if (received == 0) {
PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>();
}
@ -323,41 +325,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
bool ok;
int len;
int r;
// Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion());
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
}
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// No body for connect message.
@ -397,7 +399,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn,
uint16_t character = message[i];
buffer_position +=
unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
ASSERT(buffer_position < kBufferSize);
ASSERT(buffer_position <= kBufferSize);
// Send buffer if full or last character is encoded.
if (kBufferSize - buffer_position <
@ -454,7 +456,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
if (received <= 0) {
if (received == 0) {
return total_received;
}
total_received += received;

49
deps/v8/src/debug-debugger.js

@ -1957,7 +1957,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
return response.failed('Invalid frame number');
throw new Error('Invalid frame number');
}
return this.exec_state_.frame(frame_index);
} else {
@ -1966,20 +1966,44 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
};
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
function(request) {
if (request.arguments && "functionHandle" in request.arguments) {
if (!IS_NUMBER(request.arguments.functionHandle)) {
throw new Error('Function handle must be a number');
}
var function_mirror = LookupMirror(request.arguments.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
if (!function_mirror.isFunction()) {
throw new Error('Value of non-function type is found by handle');
}
return function_mirror;
} else {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No scopes');
throw new Error('No scopes');
}
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
return frame;
}
}
// Fill all scopes for this frame.
var total_scopes = frame.scopeCount();
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
var scope_holder = this.scopeHolderForScopeRequest_(request);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
var scopes = [];
for (var i = 0; i < total_scopes; i++) {
scopes.push(frame.scope(i));
scopes.push(scope_holder.scope(i));
}
response.body = {
fromScope: 0,
@ -1991,24 +2015,19 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No scopes');
}
// Get the frame for which the scope is requested.
var frame = this.frameForScopeRequest_(request);
// Get the frame or function for which the scope is requested.
var scope_holder = this.scopeHolderForScopeRequest_(request);
// With no scope argument just return top scope.
var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
scope_index = %ToNumber(request.arguments.number);
if (scope_index < 0 || frame.scopeCount() <= scope_index) {
if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
return response.failed('Invalid scope number');
}
}
response.body = frame.scope(scope_index);
response.body = scope_holder.scope(scope_index);
};

86
deps/v8/src/debug.cc

@ -892,6 +892,16 @@ void Debug::Iterate(ObjectVisitor* v) {
}
void Debug::PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end) {
OS::Abort();
}
Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
@ -984,11 +994,34 @@ Object* Debug::Break(Arguments args) {
// Count frames until target frame
int count = 0;
JavaScriptFrameIterator it(isolate_);
while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
count++;
it.Advance();
}
// Catch the cases that would lead to crashes and capture
// - C entry FP at which to start stack crawl.
// - FP of the frame at which we plan to stop stepping out (last FP).
// - current FP that's larger than last FP.
// - Counter for the number of steps to step out.
if (it.done()) {
// We crawled the entire stack, never reaching last_fp_.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
NULL,
count,
0xFEEEEEEE);
} else if (it.frame()->fp() != thread_local_.last_fp_) {
// We crawled over last_fp_, without getting a match.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
it.frame()->fp(),
count,
0xFEEEEEEE);
}
// If we found original frame
if (it.frame()->fp() == thread_local_.last_fp_) {
if (step_count > 1) {
@ -1418,7 +1451,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
thread_local_.last_fp_ = frame->fp();
thread_local_.last_fp_ = frame->UnpaddedFP();
} else {
// If there's restarter frame on top of the stack, just get the pointer
// to function which is going to be restarted.
@ -1487,7 +1520,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// propagated on the next Debug::Break.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(frame->pc());
thread_local_.last_fp_ = frame->fp();
thread_local_.last_fp_ = frame->UnpaddedFP();
}
// Step in or Step in min
@ -1522,7 +1555,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
break_location_iterator->code()->SourceStatementPosition(frame->pc());
return thread_local_.last_fp_ == frame->fp() &&
return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position;
}
@ -1723,7 +1756,7 @@ void Debug::ClearOneShot() {
void Debug::ActivateStepIn(StackFrame* frame) {
ASSERT(!StepOutActive());
thread_local_.step_into_fp_ = frame->fp();
thread_local_.step_into_fp_ = frame->UnpaddedFP();
}
@ -1734,7 +1767,7 @@ void Debug::ClearStepIn() {
void Debug::ActivateStepOut(StackFrame* frame) {
ASSERT(!StepInActive());
thread_local_.step_out_fp_ = frame->fp();
thread_local_.step_out_fp_ = frame->UnpaddedFP();
}
@ -1751,20 +1784,19 @@ void Debug::ClearStepNext() {
// Helper function to compile full code for debugging. This code will
// have debug break slots and deoptimization
// information. Deoptimization information is required in case that an
// optimized version of this function is still activated on the
// stack. It will also make sure that the full code is compiled with
// the same flags as the previous version - that is flags which can
// change the code generated. The current method of mapping from
// already compiled full code without debug break slots to full code
// with debug break slots depends on the generated code is otherwise
// exactly the same.
static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
// have debug break slots and deoptimization information. Deoptimization
// information is required in case that an optimized version of this
// function is still activated on the stack. It will also make sure that
// the full code is compiled with the same flags as the previous version,
// that is flags which can change the code generated. The current method
// of mapping from already compiled full code without debug break slots
// to full code with debug break slots depends on the generated code is
// otherwise exactly the same.
static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
CompilationInfo info(shared);
CompilationInfo info(function);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
@ -1776,7 +1808,7 @@ static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
info.isolate()->clear_pending_exception();
#if DEBUG
if (result) {
Handle<Code> new_code(shared->code());
Handle<Code> new_code(function->shared()->code());
ASSERT(new_code->has_debug_break_slots());
ASSERT(current_code->is_compiled_optimizable() ==
new_code->is_compiled_optimizable());
@ -1857,13 +1889,6 @@ static void RedirectActivationsToRecompiledCodeOnThread(
// break slots.
debug_break_slot_count++;
}
if (frame_code->has_self_optimization_header() &&
!new_code->has_self_optimization_header()) {
delta -= FullCodeGenerator::self_optimization_header_size();
} else {
ASSERT(frame_code->has_self_optimization_header() ==
new_code->has_self_optimization_header());
}
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
if (FLAG_trace_deopt) {
@ -1987,6 +2012,7 @@ void Debug::PrepareForBreakPoints() {
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
if (function->code()->kind() == Code::FUNCTION &&
function->code()->has_debug_break_slots()) {
@ -1994,7 +2020,6 @@ void Debug::PrepareForBreakPoints() {
continue;
}
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
@ -2014,7 +2039,7 @@ void Debug::PrepareForBreakPoints() {
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
CompileFullCodeForDebugging(function, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
@ -2234,6 +2259,13 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
}
const int Debug::FramePaddingLayout::kInitialSize = 1;
// Any even value bigger than kInitialSize as needed for stack scanning.
const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global();
}

55
deps/v8/src/debug.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -232,6 +232,12 @@ class Debug {
void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v);
NO_INLINE(void PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end));
Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
@ -245,6 +251,8 @@ class Debug {
bool IsBreakOnException(ExceptionBreakType type);
void PrepareStep(StepAction step_action, int step_count);
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
bool StepNextContinue(BreakLocationIterator* break_location_iterator,
JavaScriptFrame* frame);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
@ -455,6 +463,50 @@ class Debug {
// Architecture-specific constant.
static const bool kFrameDropperSupported;
/**
* Defines layout of a stack frame that supports padding. This is a regular
* internal frame that has a flexible stack structure. LiveEdit can shift
* its lower part up the stack, taking up the 'padding' space when additional
* stack memory is required.
* Such frame is expected immediately above the topmost JavaScript frame.
*
* Stack Layout:
* --- Top
* LiveEdit routine frames
* ---
* C frames of debug handler
* ---
* ...
* ---
* An internal frame that has n padding words:
* - any number of words as needed by code -- upper part of frame
* - padding size: a Smi storing n -- current size of padding
* - padding: n words filled with kPaddingValue in form of Smi
* - 3 context/type words of a regular InternalFrame
* - fp
* ---
* Topmost JavaScript frame
* ---
* ...
* --- Bottom
*/
class FramePaddingLayout : public AllStatic {
public:
// Architecture-specific constant.
static const bool kIsSupported;
// A size of frame base including fp. Padding words starts right above
// the base.
static const int kFrameBaseSize = 4;
// A number of words that should be reserved on stack for the LiveEdit use.
// Normally equals 1. Stored on stack in form of Smi.
static const int kInitialSize;
// A value that padding words are filled with (in form of Smi). Going
// bottom-top, the first word not having this value is a counter word.
static const int kPaddingValue;
};
private:
explicit Debug(Isolate* isolate);
~Debug();
@ -464,7 +516,6 @@ class Debug {
void ActivateStepIn(StackFrame* frame);
void ClearStepIn();
void ActivateStepOut(StackFrame* frame);
void ClearStepOut();
void ClearStepNext();
// Returns whether the compile succeeded.
void RemoveDebugInfo(Handle<DebugInfo> debug_info);

68
deps/v8/src/deoptimizer.cc

@ -354,6 +354,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
has_alignment_padding_(0),
input_(NULL),
output_count_(0),
jsframe_count_(0),
@ -378,6 +379,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
function->shared()->increment_deopt_count();
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
@ -593,12 +595,14 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
node_id,
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
output_[index]->GetState()->value())),
has_alignment_padding_ ? "with padding" : "no padding",
ms);
}
}
@ -769,7 +773,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
input_value,
input_offset);
@ -789,7 +793,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
output_offset,
value,
input_offset,
@ -815,7 +819,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
@ -1290,7 +1294,7 @@ Object* FrameDescription::GetExpression(int index) {
}
void TranslationBuffer::Add(int32_t value) {
void TranslationBuffer::Add(int32_t value, Zone* zone) {
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
uint32_t bits = ((is_negative ? -value : value) << 1) |
@ -1299,7 +1303,7 @@ void TranslationBuffer::Add(int32_t value) {
// each byte to indicate whether or not more bytes follow.
do {
uint32_t next = bits >> 7;
contents_.Add(((bits << 1) & 0xFF) | (next != 0));
contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
bits = next;
} while (bits != 0);
}
@ -1332,76 +1336,76 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray() {
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
buffer_->Add(literal_id);
buffer_->Add(height);
buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
buffer_->Add(literal_id, zone());
buffer_->Add(height, zone());
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
buffer_->Add(height);
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
buffer_->Add(literal_id, zone());
buffer_->Add(height, zone());
}
void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
buffer_->Add(JS_FRAME);
buffer_->Add(node_id);
buffer_->Add(literal_id);
buffer_->Add(height);
buffer_->Add(JS_FRAME, zone());
buffer_->Add(node_id, zone());
buffer_->Add(literal_id, zone());
buffer_->Add(height, zone());
}
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER);
buffer_->Add(reg.code());
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
}
void Translation::StoreInt32Register(Register reg) {
buffer_->Add(INT32_REGISTER);
buffer_->Add(reg.code());
buffer_->Add(INT32_REGISTER, zone());
buffer_->Add(reg.code(), zone());
}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER);
buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
buffer_->Add(DOUBLE_REGISTER, zone());
buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
}
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT);
buffer_->Add(index);
buffer_->Add(STACK_SLOT, zone());
buffer_->Add(index, zone());
}
void Translation::StoreInt32StackSlot(int index) {
buffer_->Add(INT32_STACK_SLOT);
buffer_->Add(index);
buffer_->Add(INT32_STACK_SLOT, zone());
buffer_->Add(index, zone());
}
void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT);
buffer_->Add(index);
buffer_->Add(DOUBLE_STACK_SLOT, zone());
buffer_->Add(index, zone());
}
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL);
buffer_->Add(literal_id);
buffer_->Add(LITERAL, zone());
buffer_->Add(literal_id, zone());
}
void Translation::StoreArgumentsObject() {
buffer_->Add(ARGUMENTS_OBJECT);
buffer_->Add(ARGUMENTS_OBJECT, zone());
}
void Translation::MarkDuplicate() {
buffer_->Add(DUPLICATE);
buffer_->Add(DUPLICATE, zone());
}

24
deps/v8/src/deoptimizer.h

@ -221,6 +221,10 @@ class Deoptimizer : public Malloced {
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate);
static const int kNotDeoptimizationEntry = -1;
@ -322,6 +326,7 @@ class Deoptimizer : public Malloced {
BailoutType bailout_type_;
Address from_;
int fp_to_sp_delta_;
int has_alignment_padding_;
// Input frame description.
FrameDescription* input_;
@ -515,10 +520,10 @@ class FrameDescription {
class TranslationBuffer BASE_EMBEDDED {
public:
TranslationBuffer() : contents_(256) { }
explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
int CurrentIndex() const { return contents_.length(); }
void Add(int32_t value);
void Add(int32_t value, Zone* zone);
Handle<ByteArray> CreateByteArray();
@ -569,12 +574,14 @@ class Translation BASE_EMBEDDED {
DUPLICATE
};
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count,
Zone* zone)
: buffer_(buffer),
index_(buffer->CurrentIndex()) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
buffer_->Add(jsframe_count);
index_(buffer->CurrentIndex()),
zone_(zone) {
buffer_->Add(BEGIN, zone);
buffer_->Add(frame_count, zone);
buffer_->Add(jsframe_count, zone);
}
int index() const { return index_; }
@ -593,6 +600,8 @@ class Translation BASE_EMBEDDED {
void StoreArgumentsObject();
void MarkDuplicate();
Zone* zone() const { return zone_; }
static int NumberOfOperandsFor(Opcode opcode);
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
@ -602,6 +611,7 @@ class Translation BASE_EMBEDDED {
private:
TranslationBuffer* buffer_;
int index_;
Zone* zone_;
};

6
deps/v8/src/double.h

@ -130,12 +130,6 @@ class Double {
return (d64 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&

134
deps/v8/src/elements-kind.cc

@ -0,0 +1,134 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "elements-kind.h"
#include "api.h"
#include "elements.h"
#include "objects.h"
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
ElementsKind GetInitialFastElementsKind() {
if (FLAG_packed_arrays) {
return FAST_SMI_ELEMENTS;
} else {
return FAST_HOLEY_SMI_ELEMENTS;
}
}
struct InitializeFastElementsKindSequence {
static void Construct(
ElementsKind** fast_elements_kind_sequence_ptr) {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
}
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
ASSERT(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
return i;
}
}
UNREACHABLE();
return 0;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
int index =
GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
elements_kind = GetFastElementsKindFromSequenceIndex(index);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
}
UNREACHABLE();
return TERMINAL_FAST_ELEMENTS_KIND;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS;
case FAST_HOLEY_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS;
case FAST_DOUBLE_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS &&
to_kind != FAST_DOUBLE_ELEMENTS;
case FAST_HOLEY_DOUBLE_ELEMENTS:
return to_kind == FAST_ELEMENTS ||
to_kind == FAST_HOLEY_ELEMENTS;
case FAST_ELEMENTS:
return to_kind == FAST_HOLEY_ELEMENTS;
case FAST_HOLEY_ELEMENTS:
return false;
default:
return false;
}
}
} } // namespace v8::internal

221
deps/v8/src/elements-kind.h

@ -0,0 +1,221 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
#include "v8checks.h"
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
FAST_HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
inline bool IsDictionaryElementsKind(ElementsKind kind) {
return kind == DICTIONARY_ELEMENTS;
}
inline bool IsExternalArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
}
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastSmiElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS;
}
inline bool IsFastObjectElementsKind(ElementsKind kind) {
return kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastHoleyElementsKind(ElementsKind kind) {
return kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsHoleyElementsKind(ElementsKind kind) {
return IsFastHoleyElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
return FAST_SMI_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
return FAST_DOUBLE_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_ELEMENTS) {
return FAST_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == FAST_SMI_ELEMENTS) {
return FAST_HOLEY_SMI_ELEMENTS;
}
if (packed_kind == FAST_DOUBLE_ELEMENTS) {
return FAST_HOLEY_DOUBLE_ELEMENTS;
}
if (packed_kind == FAST_ELEMENTS) {
return FAST_HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
ASSERT(IsFastSmiElementsKind(from_kind));
return (from_kind == FAST_SMI_ELEMENTS)
? FAST_ELEMENTS
: FAST_HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsFastSmiElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind));
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed);
inline bool CanTransitionToMoreGeneralFastElementsKind(
ElementsKind elements_kind,
bool allow_only_packed) {
return IsFastElementsKind(elements_kind) &&
(elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
(!allow_only_packed || elements_kind != FAST_ELEMENTS));
}
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_

537
deps/v8/src/elements.cc

@ -39,8 +39,14 @@
// Inheritance hierarchy:
// - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract)
// - FastObjectElementsAccessor
// - FastSmiOrObjectElementsAccessor
// - FastPackedSmiElementsAccessor
// - FastHoleySmiElementsAccessor
// - FastPackedObjectElementsAccessor
// - FastHoleyObjectElementsAccessor
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor
@ -59,15 +65,24 @@ namespace v8 {
namespace internal {
static const int kPackedSizeNotKnown = -1;
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
// fast element handler for smi-only arrays. The implementation is currently
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
FixedArray) \
V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@ -139,8 +154,6 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -148,7 +161,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -160,12 +173,15 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
@ -190,7 +206,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// Fast object arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -200,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif
}
ASSERT(to != from);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
@ -216,7 +232,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start);
}
}
if (to_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(to_kind)) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
@ -234,7 +250,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -242,7 +258,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -255,14 +271,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
ASSERT(IsFastObjectElementsKind(to_kind));
// Because Double -> Object elements transitions allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
@ -313,6 +329,76 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
}
static void CopySmiToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
Object* hole_or_smi = from->get(from_start);
if (hole_or_smi == the_hole) {
to->set_the_hole(to_start);
} else {
to->set(to_start, Smi::cast(hole_or_smi)->value());
}
}
}
static void CopyPackedSmiToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
int copy_size = raw_copy_size;
uint32_t to_end;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
to_end = to->length();
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
ASSERT(static_cast<int>(to_end) <= to->length());
ASSERT(packed_size >= 0 && packed_size <= copy_size);
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
ASSERT(!smi->IsTheHole());
to->set(to_start, Smi::cast(smi)->value());
}
while (to_start < to_end) {
to->set_the_hole(to_start++);
}
}
static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedDoubleArray* to,
@ -332,12 +418,14 @@ static void CopyObjectToDoubleElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
for (int i = 0; i < copy_size; i++) {
Object* hole_or_object = from->get(i + from_start);
if (hole_or_object->IsTheHole()) {
to->set_the_hole(i + to_start);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
Object* hole_or_object = from->get(from_start);
if (hole_or_object == the_hole) {
to->set_the_hole(to_start);
} else {
to->set(i + to_start, hole_or_object->Number());
to->set(to_start, hole_or_object->Number());
}
}
}
@ -404,6 +492,38 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
static void ValidateContents(JSObject* holder, int length) {
}
static void ValidateImpl(JSObject* holder) {
FixedArrayBase* fixed_array_base = holder->elements();
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->raw_unchecked_one_pointer_filler_map() ||
map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::cast(length_obj)->value();
}
} else {
length = fixed_array_base->length();
}
ElementsAccessorSubclass::ValidateContents(holder, length);
}
virtual void Validate(JSObject* holder) {
ElementsAccessorSubclass::ValidateImpl(holder);
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@ -424,7 +544,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
virtual MaybeObject* Get(Object* receiver,
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
@ -435,7 +555,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
static MaybeObject* GetImpl(Object* receiver,
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
BackingStore* backing_store) {
@ -444,17 +564,19 @@ class ElementsAccessorBase : public ElementsAccessor {
: backing_store->GetHeap()->the_hole_value();
}
virtual MaybeObject* SetLength(JSArray* array,
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
array, length, BackingStore::cast(array->elements()));
}
static MaybeObject* SetLengthImpl(JSObject* obj,
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
BackingStore* backing_store);
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
int capacity,
int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
@ -463,45 +585,61 @@ class ElementsAccessorBase : public ElementsAccessor {
length);
}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
JSObject* obj,
int capacity,
int length) {
UNIMPLEMENTED();
return obj;
}
virtual MaybeObject* Delete(JSObject* obj,
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
UNREACHABLE();
return NULL;
}
virtual MaybeObject* CopyElements(JSObject* from_holder,
MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
int packed_size = kPackedSizeNotKnown;
if (from == NULL) {
from = from_holder->elements();
}
if (from_holder) {
ElementsKind elements_kind = from_holder->GetElementsKind();
bool is_packed = IsFastPackedElementsKind(elements_kind) &&
from_holder->IsJSArray();
if (is_packed) {
packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
if (copy_size >= 0 && packed_size > copy_size) {
packed_size = copy_size;
}
}
}
if (from->length() == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, copy_size);
from, from_start, to, to_kind, to_start, packed_size, copy_size);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
Object* receiver,
JSObject* holder,
FixedArray* to,
FixedArrayBase* from) {
@ -620,6 +758,7 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
@ -630,10 +769,21 @@ class FastElementsAccessor
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
bool same_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastTypeElements()) {
if (array->HasFastSmiOrObjectElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj;
}
@ -665,39 +815,40 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result;
array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
return array->GetHeap()->undefined_value();
}
};
class FastObjectElementsAccessor
: public FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize> {
public:
explicit FastObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
ASSERT(obj->HasFastElements() ||
obj->HasFastSmiOnlyElements() ||
uint32_t key,
JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
backing_store = FixedArray::cast(backing_store->get(1));
backing_store =
KindTraits::BackingStore::cast(
FixedArray::cast(backing_store)->get(1));
} else {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
MaybeObject* transitioned =
obj->TransitionElementsKind(GetHoleyElementsKind(kind));
if (transitioned->IsFailure()) return transitioned;
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
Object* writable;
MaybeObject* maybe = obj->EnsureWritableFastElements();
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
backing_store = KindTraits::BackingStore::cast(writable);
}
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
@ -709,15 +860,14 @@ class FastObjectElementsAccessor
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
((key > 0 && backing_store->get(key - 1) == hole) ||
(key + 1 < length && backing_store->get(key + 1) == hole))) {
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
if (backing_store->get(i) != hole) ++num_used;
if (!backing_store->is_the_hole(i)) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
@ -730,26 +880,89 @@ class FastObjectElementsAccessor
return heap->true_value();
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
static bool HasElementImpl(
Object* receiver,
JSObject* holder,
uint32_t key,
typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
#if DEBUG
FixedArrayBase* elements = holder->elements();
Heap* heap = elements->GetHeap();
Map* map = elements->map();
ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
(map == heap->fixed_array_map() ||
map == heap->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(KindTraits::Kind) ==
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
backing_store->is_the_hole(i)));
}
#endif
}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize> {
public:
explicit FastSmiOrObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize>(name) {}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
if (IsFastSmiOrObjectElementsKind(to_kind)) {
CopyObjectToObjectElements(
FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(from), KindTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
} else if (IsFastDoubleElementsKind(to_kind)) {
if (IsFastSmiElementsKind(KindTraits::Kind)) {
if (IsFastPackedElementsKind(KindTraits::Kind) &&
packed_size != kPackedSizeNotKnown) {
CopyPackedSmiToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start,
packed_size, copy_size);
} else {
CopySmiToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
}
case FAST_DOUBLE_ELEMENTS:
} else {
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
}
} else {
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
@ -759,64 +972,102 @@ class FastObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
JSObject::SetFastElementsCapacityMode set_capacity_mode =
obj->HasFastSmiOnlyElements()
? JSObject::kAllowSmiOnlyElements
: JSObject::kDontAllowSmiOnlyElements;
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
return obj->SetFastElementsCapacityAndLength(capacity,
length,
set_capacity_mode);
}
};
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key);
}
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> > {
public:
explicit FastPackedSmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
};
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
public:
explicit FastHoleySmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
};
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> > {
public:
explicit FastPackedObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> >(name) {}
};
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
public:
explicit FastHoleyObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastDoubleElementsAccessor
: public FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
return obj->SetFastDoubleElementsCapacityAndLength(capacity,
length);
}
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
friend class FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
@ -826,26 +1077,35 @@ class FastDoubleElementsAccessor
}
return to->GetHeap()->undefined_value();
}
};
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
int length = obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: FixedDoubleArray::cast(obj->elements())->length();
if (key < static_cast<uint32_t>(length)) {
FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
}
return obj->GetHeap()->true_value();
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
return key < static_cast<uint32_t>(backing_store->length()) &&
!backing_store->is_the_hole(key);
}
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
explicit FastPackedDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
};
class FastHoleyDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
explicit FastHoleyDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
};
@ -866,7 +1126,7 @@ class ExternalElementsAccessor
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ElementsKindTraits<Kind> >;
static MaybeObject* GetImpl(Object* receiver,
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
BackingStore* backing_store) {
@ -876,7 +1136,8 @@ class ExternalElementsAccessor
: backing_store->GetHeap()->undefined_value();
}
static MaybeObject* SetLengthImpl(JSObject* obj,
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
BackingStore* backing_store) {
// External arrays do not support changing their length.
@ -884,7 +1145,7 @@ class ExternalElementsAccessor
return obj;
}
virtual MaybeObject* Delete(JSObject* obj,
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
// External arrays always ignore deletes.
@ -1002,7 +1263,8 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
@ -1057,7 +1319,8 @@ class DictionaryElementsAccessor
return length_object;
}
static MaybeObject* DeleteCommon(JSObject* obj,
MUST_USE_RESULT static MaybeObject* DeleteCommon(
JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
Isolate* isolate = obj->GetIsolate();
@ -1102,20 +1365,24 @@ class DictionaryElementsAccessor
return heap->true_value();
}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
@ -1131,13 +1398,14 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
virtual MaybeObject* Delete(JSObject* obj,
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
static MaybeObject* GetImpl(Object* receiver,
MUST_USE_RESULT static MaybeObject* GetImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
SeededNumberDictionary* backing_store) {
@ -1186,7 +1454,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
static MaybeObject* GetImpl(Object* receiver,
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
FixedArray* parameter_map) {
@ -1216,7 +1484,8 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
static MaybeObject* SetLengthImpl(JSObject* obj,
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
@ -1225,7 +1494,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return obj;
}
virtual MaybeObject* Delete(JSObject* obj,
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
@ -1240,17 +1509,21 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
return FastObjectElementsAccessor::DeleteCommon(obj, key);
// It's difficult to access the version of DeleteCommon that is declared
// in the templatized super class, call the concrete implementation in
// the class for the most generalized ElementsKind subclass.
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
return obj->GetHeap()->true_value();
}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@ -1304,7 +1577,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS];
} else {
return elements_accessors_[FAST_ELEMENTS];
return elements_accessors_[FAST_HOLEY_ELEMENTS];
}
case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
@ -1332,18 +1605,8 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
static struct ConcreteElementsAccessors {
#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
ELEMENTS_LIST(ACCESSOR_STRUCT)
#undef ACCESSOR_STRUCT
} element_accessors = {
#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
ELEMENTS_LIST(ACCESSOR_INIT)
#undef ACCESSOR_INIT
};
static ElementsAccessor* accessor_array[] = {
#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
ELEMENTS_LIST(ACCESSOR_ARRAY)
#undef ACCESSOR_ARRAY
};
@ -1355,8 +1618,16 @@ void ElementsAccessor::InitializeOncePerProcess() {
}
void ElementsAccessor::TearDown() {
#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
ELEMENTS_LIST(ACCESSOR_DELETE)
#undef ACCESSOR_DELETE
elements_accessors_ = NULL;
}
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,

23
deps/v8/src/elements.h

@ -28,6 +28,7 @@
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
#include "elements-kind.h"
#include "objects.h"
#include "heap.h"
#include "isolate.h"
@ -45,6 +46,10 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
@ -60,7 +65,8 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
virtual MaybeObject* Get(Object* receiver,
MUST_USE_RESULT virtual MaybeObject* Get(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
@ -70,7 +76,7 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
virtual MaybeObject* SetLength(JSArray* holder,
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
@ -79,12 +85,12 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
virtual MaybeObject* Delete(JSObject* holder,
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@ -101,7 +107,8 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
virtual MaybeObject* CopyElements(JSObject* source_holder,
MUST_USE_RESULT virtual MaybeObject* CopyElements(
JSObject* source_holder,
uint32_t source_start,
FixedArrayBase* destination,
ElementsKind destination_kind,
@ -109,7 +116,7 @@ class ElementsAccessor {
int copy_size,
FixedArrayBase* source = NULL) = 0;
MaybeObject* CopyElements(JSObject* from_holder,
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
ElementsKind to_kind,
FixedArrayBase* from = NULL) {
@ -117,7 +124,8 @@ class ElementsAccessor {
kCopyToEndAndInitializeToHole, from);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
Object* receiver,
JSObject* holder,
FixedArray* to,
FixedArrayBase* from = NULL) = 0;
@ -131,6 +139,7 @@ class ElementsAccessor {
static ElementsAccessor* ForArray(FixedArrayBase* array);
static void InitializeOncePerProcess();
static void TearDown();
protected:
friend class NonStrictArgumentsElementsAccessor;

7
deps/v8/src/extensions/externalize-string-extension.cc

@ -133,11 +133,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
void ExternalizeStringExtension::Register() {
static ExternalizeStringExtension* externalize_extension = NULL;
if (externalize_extension == NULL)
externalize_extension = new ExternalizeStringExtension;
static v8::DeclareExtension externalize_extension_declaration(
externalize_extension);
static ExternalizeStringExtension externalize_extension;
static v8::DeclareExtension declaration(&externalize_extension);
}
} } // namespace v8::internal

5
deps/v8/src/extensions/gc-extension.cc

@ -46,9 +46,8 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
void GCExtension::Register() {
static GCExtension* gc_extension = NULL;
if (gc_extension == NULL) gc_extension = new GCExtension();
static v8::DeclareExtension gc_extension_declaration(gc_extension);
static GCExtension gc_extension;
static v8::DeclareExtension declaration(&gc_extension);
}
} } // namespace v8::internal

76
deps/v8/src/factory.cc

@ -34,6 +34,7 @@
#include "macro-assembler.h"
#include "objects.h"
#include "objects-visiting.h"
#include "platform.h"
#include "scopeinfo.h"
namespace v8 {
@ -114,7 +115,8 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
DescriptorArray::Allocate(number_of_descriptors),
DescriptorArray::Allocate(number_of_descriptors,
DescriptorArray::MAY_BE_SHARED),
DescriptorArray);
}
@ -291,6 +293,15 @@ Handle<Context> Factory::NewGlobalContext() {
}
Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
Context);
}
Handle<Context> Factory::NewFunctionContext(int length,
Handle<JSFunction> function) {
CALL_HEAP_FUNCTION(
@ -324,8 +335,7 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
}
Handle<Context> Factory::NewBlockContext(
Handle<JSFunction> function,
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
@ -487,7 +497,9 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map);
CALL_HEAP_FUNCTION(isolate(),
src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED),
Map);
}
@ -667,6 +679,43 @@ Handle<Object> Factory::NewError(const char* type,
}
Handle<String> Factory::EmergencyNewError(const char* type,
Handle<JSArray> args) {
const int kBufferSize = 1000;
char buffer[kBufferSize];
size_t space = kBufferSize;
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
OS::StrNCpy(v, type, space);
space -= Min(space, strlen(type));
p = &buffer[kBufferSize] - space;
for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
if (space > 0) {
*p++ = ' ';
space--;
if (space > 0) {
MaybeObject* maybe_arg = args->GetElement(i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, space);
OS::StrNCpy(v2, arg, space);
space -= Min(space, strlen(arg));
p = &buffer[kBufferSize] - space;
}
}
}
if (space > 0) {
*p = '\0';
} else {
buffer[kBufferSize - 1] = '\0';
}
Handle<String> error_string = NewStringFromUtf8(CStrVector(buffer), TENURED);
return error_string;
}
Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
@ -675,8 +724,9 @@ Handle<Object> Factory::NewError(const char* maker,
isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction())
return undefined_value();
if (!fun_obj->IsJSFunction()) {
return EmergencyNewError(type, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
Handle<Object> argv[] = { type_obj, args };
@ -767,7 +817,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type,
instance_size,
FAST_SMI_ONLY_ELEMENTS);
GetInitialFastElementsKind());
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@ -892,7 +942,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing.
if (result->LinearSearch(*key, descriptor_count) ==
if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
result->Set(descriptor_count, &desc, witness);
@ -928,6 +978,13 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
Handle<JSModule> Factory::NewJSModule() {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSModule(), JSModule);
}
Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) {
CALL_HEAP_FUNCTION(isolate(),
@ -998,10 +1055,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainElements(*elements, mode));
array->EnsureCanContainElements(*elements, length, mode));
}

26
deps/v8/src/factory.h

@ -162,9 +162,12 @@ class Factory {
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
// Create a module context.
Handle<Context> NewModuleContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info);
// Create a function context.
Handle<Context> NewFunctionContext(int length,
Handle<JSFunction> function);
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
// Create a catch context.
Handle<Context> NewCatchContext(Handle<JSFunction> function,
@ -177,7 +180,7 @@ class Factory {
Handle<Context> previous,
Handle<JSObject> extension);
// Create a 'block' context.
// Create a block context.
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
@ -213,9 +216,10 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
Handle<Map> NewMap(InstanceType type,
Handle<Map> NewMap(
InstanceType type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -262,14 +266,18 @@ class Factory {
// runtime.
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS modules are pretenured.
Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(int capacity,
ElementsKind elements_kind = FAST_ELEMENTS,
Handle<JSArray> NewJSArray(
int capacity,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = FAST_ELEMENTS,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array,
@ -281,6 +289,7 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
@ -329,6 +338,7 @@ class Factory {
Handle<Object> NewError(const char* maker, const char* type,
Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
Handle<Object> NewError(const char* maker, const char* type,
Vector< Handle<Object> > args);
Handle<Object> NewError(const char* type,

16
deps/v8/src/flag-definitions.h

@ -132,6 +132,10 @@ public:
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
DEFINE_bool(es5_readonly, false,
"activate correct semantics for inheriting readonliness")
DEFINE_bool(es52_globals, false,
"activate new semantics for global var declarations")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
@ -148,6 +152,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@ -165,7 +170,12 @@ DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
"maximum number of AST nodes considered for a single inlining")
DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true,
@ -188,6 +198,10 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, false,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")

50
deps/v8/src/frames.cc

@ -469,6 +469,20 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
}
Address StackFrame::UnpaddedFP() const {
#if defined(V8_TARGET_ARCH_IA32)
if (!is_optimized()) return fp();
int32_t alignment_state = Memory::int32_at(
fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset);
return (alignment_state == kAlignmentPaddingPushed) ?
(fp() + kPointerSize) : fp();
#else
return fp();
#endif
}
Code* EntryFrame::unchecked_code() const {
return HEAP->raw_unchecked_js_entry_code();
}
@ -1359,34 +1373,28 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) {
int n = 0;
while (reglist != 0) {
n++;
reglist &= reglist - 1; // clear one bit
}
return n;
return CompilerIntrinsics::CountSetBits(reglist);
}
struct JSCallerSavedCodeData {
JSCallerSavedCodeData() {
int reg_code[kNumJSCallerSaved];
};
JSCallerSavedCodeData caller_saved_code_data;
void SetUpJSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0)
reg_code[i++] = r;
caller_saved_code_data.reg_code[i++] = r;
ASSERT(i == kNumJSCallerSaved);
}
int reg_code[kNumJSCallerSaved];
};
static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
LAZY_INSTANCE_INITIALIZER;
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
return caller_saved_code_data.Get().reg_code[n];
return caller_saved_code_data.reg_code[n];
}
@ -1400,11 +1408,11 @@ class field##_Wrapper : public ZoneObject { \
STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
#undef DEFINE_WRAPPER
static StackFrame* AllocateFrameCopy(StackFrame* frame) {
static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: { \
field##_Wrapper* wrapper = \
new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
new(zone) field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
return &wrapper->frame_; \
}
@ -1416,11 +1424,11 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame) {
return NULL;
}
Vector<StackFrame*> CreateStackMap() {
ZoneList<StackFrame*> list(10);
Vector<StackFrame*> CreateStackMap(Zone* zone) {
ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it; !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame());
list.Add(frame);
StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
list.Add(frame, zone);
}
return list.ToVector();
}

12
deps/v8/src/frames.h

@ -40,6 +40,8 @@ typedef uint32_t RegList;
// Get the number of registers in a given register list.
int NumRegs(RegList list);
void SetUpJSCallerSavedCodeData();
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
@ -204,11 +206,19 @@ class StackFrame BASE_EMBEDDED {
Address fp() const { return state_.fp; }
Address caller_sp() const { return GetCallerStackPointer(); }
// If this frame is optimized and was dynamically aligned return its old
// unaligned frame pointer. When the frame is deoptimized its FP will shift
// up one word and become unaligned.
Address UnpaddedFP() const;
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
void UpdateFp(Address fp) { state_.fp = fp; }
Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame.
@ -883,7 +893,7 @@ class StackFrameLocator BASE_EMBEDDED {
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap();
Vector<StackFrame*> CreateStackMap(Zone* zone);
} } // namespace v8::internal

133
deps/v8/src/full-codegen.cc

@ -303,7 +303,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
FullCodeGenerator cgen(&masm, info);
FullCodeGenerator cgen(&masm, info, isolate->zone());
cgen.Generate();
if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception());
@ -316,7 +316,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize) &&
info->function()->scope()->AllowsLazyRecompilation());
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
@ -332,9 +331,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
if (!code.is_null()) {
isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
}
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
@ -444,14 +440,14 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
}
}
#endif // DEBUG
bailout_entries_.Add(entry);
bailout_entries_.Add(entry, zone());
}
void FullCodeGenerator::RecordTypeFeedbackCell(
unsigned id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry);
type_feedback_cells_.Add(entry, zone());
}
@ -460,7 +456,7 @@ void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
// state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry);
stack_checks_.Add(entry, zone());
}
@ -573,88 +569,91 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int save_global_count = global_count_;
global_count_ = 0;
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
AstVisitor::VisitDeclarations(declarations);
// Batch declare global functions and variables.
if (global_count_ > 0) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
int length = declarations->length();
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
if (fun_decl == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(fun_decl->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
// declaration the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
array->set(i, *globals_->at(i));
DeclareGlobals(array);
}
global_count_ = save_global_count;
globals_ = saved_globals;
}
void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
Handle<JSModule> instance = module->interface()->Instance();
ASSERT(!instance.is_null());
void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
}
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
// Generate code for module creation and linking.
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
if (scope_info->HasContext()) {
// Set up module context.
__ Push(scope_info);
__ Push(instance);
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
// TODO(rossberg)
scope_ = saved_scope;
if (scope_info->HasContext()) {
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
// Populate module instance object.
const PropertyAttributes attr =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
for (Interface::Iterator it = module->interface()->iterator();
!it.done(); it.Advance()) {
if (it.interface()->IsModule()) {
Handle<Object> value = it.interface()->Instance();
ASSERT(!value.is_null());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
} else {
// TODO(rossberg): set proper getters instead of undefined...
// instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
Handle<Object> value(isolate()->heap()->undefined_value());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
}
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
// TODO(rossberg)
}
USE(instance->PreventExtensions());
}
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
// TODO(rossberg)
// Noting to do.
// The instance object is resolved statically through the module's interface.
}
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
// TODO(rossberg)
// Noting to do.
// The instance object is resolved statically through the module's interface.
}
@ -916,9 +915,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables.
if (stmt->block_scope() != NULL) {
if (stmt->scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope();
scope_ = stmt->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
@ -945,7 +944,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
// Pop block context if necessary.
if (stmt->block_scope() != NULL) {
if (stmt->scope() != NULL) {
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset,

54
deps/v8/src/full-codegen.h

@ -77,28 +77,25 @@ class FullCodeGenerator: public AstVisitor {
TOS_REG
};
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
Zone* zone)
: masm_(masm),
info_(info),
scope_(info->scope()),
nesting_stack_(NULL),
loop_depth_(0),
global_count_(0),
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0),
stack_checks_(2), // There's always at least one.
? info->function()->ast_node_count() : 0, zone),
stack_checks_(2, zone), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0),
? info->function()->ast_node_count() : 0, zone),
ic_total_count_(0),
has_self_optimization_header_(false) { }
zone_(zone) { }
static bool MakeCode(CompilationInfo* info);
// Returns the platform-specific size in bytes of the self-optimization
// header.
static int self_optimization_header_size();
// Encode state and pc-offset as a BitField<type, start, size>.
// Only use 30 bits because we encode the result as a smi.
class StateField : public BitField<State, 0, 1> { };
@ -113,6 +110,8 @@ class FullCodeGenerator: public AstVisitor {
return NULL;
}
Zone* zone() const { return zone_; }
private:
class Breakable;
class Iteration;
@ -207,7 +206,7 @@ class FullCodeGenerator: public AstVisitor {
virtual ~NestedBlock() {}
virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
if (statement()->AsBlock()->block_scope() != NULL) {
if (statement()->AsBlock()->scope() != NULL) {
++(*context_length);
}
return previous_;
@ -241,7 +240,7 @@ class FullCodeGenerator: public AstVisitor {
// The finally block of a try/finally statement.
class Finally : public NestedStatement {
public:
static const int kElementCount = 2;
static const int kElementCount = 5;
explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
@ -418,12 +417,9 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true,
Label* if_false);
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
// Increments global_count_ for unallocated variables.
void EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function);
// If enabled, emit debug code for checking that the current context is
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
// Platform-specific code for checking the stack limit at the back edge of
// a loop.
@ -553,12 +549,8 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
bool is_classic_mode() {
return language_mode() == CLASSIC_MODE;
}
LanguageMode language_mode() {
return function()->language_mode();
}
bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
LanguageMode language_mode() { return function()->language_mode(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return scope_; }
@ -790,15 +782,15 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
int global_count_;
ZoneList<Handle<Object> >* globals_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
bool has_self_optimization_header_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
Zone* zone_;
friend class NestedStatement;
@ -809,16 +801,16 @@ class FullCodeGenerator: public AstVisitor {
// A map from property names to getter/setter pairs allocated in the zone.
class AccessorTable: public TemplateHashMap<Literal,
ObjectLiteral::Accessors,
ZoneListAllocationPolicy> {
ZoneAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone) :
TemplateHashMap<Literal,
ObjectLiteral::Accessors,
ZoneListAllocationPolicy>(Literal::Match),
TemplateHashMap<Literal, ObjectLiteral::Accessors,
ZoneAllocationPolicy>(Literal::Match,
ZoneAllocationPolicy(zone)),
zone_(zone) { }
Iterator lookup(Literal* literal) {
Iterator it = find(literal, true);
Iterator it = find(literal, true, ZoneAllocationPolicy(zone_));
if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
return it;
}

15
deps/v8/src/func-name-inferrer.cc

@ -34,11 +34,12 @@
namespace v8 {
namespace internal {
FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
: isolate_(isolate),
entries_stack_(10),
names_stack_(5),
funcs_to_infer_(4) {
entries_stack_(10, zone),
names_stack_(5, zone),
funcs_to_infer_(4, zone),
zone_(zone) {
}
@ -48,21 +49,21 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
// and starts with a capital letter.
if (name->length() > 0 && Runtime::IsUpperCaseChar(
isolate()->runtime_state(), name->Get(0))) {
names_stack_.Add(Name(name, kEnclosingConstructorName));
names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
names_stack_.Add(Name(name, kLiteralName));
names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName));
names_stack_.Add(Name(name, kVariableName), zone());
}
}

10
deps/v8/src/func-name-inferrer.h

@ -45,7 +45,7 @@ class Isolate;
// a name.
class FuncNameInferrer : public ZoneObject {
public:
explicit FuncNameInferrer(Isolate* isolate);
FuncNameInferrer(Isolate* isolate, Zone* zone);
// Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
@ -55,7 +55,7 @@ class FuncNameInferrer : public ZoneObject {
// Enters name collection state.
void Enter() {
entries_stack_.Add(names_stack_.length());
entries_stack_.Add(names_stack_.length(), zone());
}
// Pushes an encountered name onto names stack when in collection state.
@ -66,7 +66,7 @@ class FuncNameInferrer : public ZoneObject {
// Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
funcs_to_infer_.Add(func_to_infer);
funcs_to_infer_.Add(func_to_infer, zone());
}
}
@ -88,6 +88,8 @@ class FuncNameInferrer : public ZoneObject {
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
if (entries_stack_.is_empty())
funcs_to_infer_.Clear();
}
private:
@ -103,6 +105,7 @@ class FuncNameInferrer : public ZoneObject {
};
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
// Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
@ -117,6 +120,7 @@ class FuncNameInferrer : public ZoneObject {
ZoneList<int> entries_stack_;
ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};

6
deps/v8/src/handles.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -729,9 +729,9 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
storage->set(index, descs->GetKey(i));
PropertyDetails details(descs->GetDetails(i));
PropertyDetails details = descs->GetDetails(i);
sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) {
if (details.type() != FIELD) {

102
deps/v8/src/hashmap.h

@ -40,9 +40,16 @@ class TemplateHashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// The default capacity. This is used by the call sites which want
// to pass in a non-default AllocationPolicy but want to use the
// default value of capacity specified by the implementation.
static const uint32_t kDefaultHashMapCapacity = 8;
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
TemplateHashMapImpl(MatchFun match,
uint32_t capacity = kDefaultHashMapCapacity,
AllocationPolicy allocator = AllocationPolicy());
~TemplateHashMapImpl();
@ -60,10 +67,13 @@ class TemplateHashMapImpl {
// but insert is set, a new entry is inserted with
// corresponding key, key hash, and NULL value.
// Otherwise, NULL is returned.
Entry* Lookup(void* key, uint32_t hash, bool insert);
Entry* Lookup(void* key, uint32_t hash, bool insert,
AllocationPolicy allocator = AllocationPolicy());
// Removes the entry with matching key.
void Remove(void* key, uint32_t hash);
// It returns the value of the deleted entry
// or null if there is no value for such key.
void* Remove(void* key, uint32_t hash);
// Empties the hash map (occupancy() == 0).
void Clear();
@ -95,29 +105,30 @@ class TemplateHashMapImpl {
Entry* map_end() const { return map_ + capacity_; }
Entry* Probe(void* key, uint32_t hash);
void Initialize(uint32_t capacity);
void Resize();
void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator);
};
typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
template<class P>
TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
uint32_t initial_capacity) {
template<class AllocationPolicy>
TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
match_ = match;
Initialize(initial_capacity);
Initialize(initial_capacity, allocator);
}
template<class P>
TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
P::Delete(map_);
template<class AllocationPolicy>
TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
AllocationPolicy::Delete(map_);
}
template<class P>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
void* key, uint32_t hash, bool insert) {
template<class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Lookup(
void* key, uint32_t hash, bool insert, AllocationPolicy allocator) {
// Find a matching entry.
Entry* p = Probe(key, hash);
if (p->key != NULL) {
@ -133,7 +144,7 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
// Grow the map if we reached >= 80% occupancy.
if (occupancy_ + occupancy_/4 >= capacity_) {
Resize();
Resize(allocator);
p = Probe(key, hash);
}
@ -145,15 +156,16 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
}
template<class P>
void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
template<class AllocationPolicy>
void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
// Key not found nothing to remove.
return;
return NULL;
}
void* value = p->value;
// To remove an entry we need to ensure that it does not create an empty
// entry that will cause the search for another entry to stop too soon. If all
// the entries between the entry to remove and the next empty slot have their
@ -202,11 +214,12 @@ void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
// Clear the entry which is allowed to en emptied.
p->key = NULL;
occupancy_--;
return value;
}
template<class P>
void TemplateHashMapImpl<P>::Clear() {
template<class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
@ -216,15 +229,16 @@ void TemplateHashMapImpl<P>::Clear() {
}
template<class P>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
template<class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Start() const {
return Next(map_ - 1);
}
template<class P>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
const {
template<class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@ -236,9 +250,9 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
}
template<class P>
typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
uint32_t hash) {
template<class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
ASSERT(key != NULL);
ASSERT(IsPowerOf2(capacity_));
@ -258,10 +272,11 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
}
template<class P>
void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
template<class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == NULL) {
v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
return;
@ -271,24 +286,24 @@ void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
}
template<class P>
void TemplateHashMapImpl<P>::Resize() {
template<class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
Entry* map = map_;
uint32_t n = occupancy_;
// Allocate larger map.
Initialize(capacity_ * 2);
Initialize(capacity_ * 2, allocator);
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Lookup(p->key, p->hash, true)->value = p->value;
Lookup(p->key, p->hash, true, allocator)->value = p->value;
n--;
}
}
// Delete old map.
P::Delete(map);
AllocationPolicy::Delete(map);
}
@ -325,13 +340,18 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
};
TemplateHashMap(
typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
: TemplateHashMapImpl<AllocationPolicy>(match) { }
typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
AllocationPolicy allocator = AllocationPolicy())
: TemplateHashMapImpl<AllocationPolicy>(
match,
TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
allocator) { }
Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); }
Iterator find(Key* key, bool insert = false) {
return Iterator(this, this->Lookup(key, key->Hash(), insert));
Iterator find(Key* key, bool insert = false,
AllocationPolicy allocator = AllocationPolicy()) {
return Iterator(this, this->Lookup(key, key->Hash(), insert, allocator));
}
};

27
deps/v8/src/heap-inl.h

@ -460,15 +460,16 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
}
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
}
int amount_since_last_global_gc =
intptr_t amount_since_last_global_gc =
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
@ -594,12 +595,24 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(heap_->InNewSpace(new_space_strings_[i]));
ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
}
#endif
}

50
deps/v8/src/heap-profiler.cc

@ -33,7 +33,6 @@
namespace v8 {
namespace internal {
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
@ -86,6 +85,24 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
}
void HeapProfiler::StartHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
}
void HeapProfiler::StopHeapObjectsTracking() {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
}
void HeapProfiler::DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
@ -136,6 +153,28 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
}
void HeapProfiler::StartHeapObjectsTrackingImpl() {
snapshots_->StartHeapObjectsTracking();
}
SnapshotObjectId HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
return snapshots_->PushHeapObjectsStats(stream);
}
void HeapProfiler::StopHeapObjectsTrackingImpl() {
snapshots_->StopHeapObjectsTracking();
}
size_t HeapProfiler::GetMemorySizeUsedByProfiler() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
size_t size = profiler->snapshots_->GetUsedMemorySize();
return size;
}
int HeapProfiler::GetSnapshotsCount() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
@ -158,6 +197,15 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
}
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);
return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
}
void HeapProfiler::DeleteAllSnapshots() {
HeapProfiler* profiler = Isolate::Current()->heap_profiler();
ASSERT(profiler != NULL);

13
deps/v8/src/heap-profiler.h

@ -44,22 +44,27 @@ class HeapSnapshotsCollection;
} \
} while (false)
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
static void SetUp();
static void TearDown();
static size_t GetMemorySizeUsedByProfiler();
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
static HeapSnapshot* TakeSnapshot(String* name,
int type,
v8::ActivityControl* control);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
static void DeleteAllSnapshots();
void ObjectMoveEvent(Address from, Address to);
@ -84,6 +89,10 @@ class HeapProfiler {
v8::ActivityControl* control);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;

317
deps/v8/src/heap.cc

@ -42,6 +42,7 @@
#include "natives.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "once.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
@ -60,8 +61,6 @@
namespace v8 {
namespace internal {
static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
Heap::Heap()
: isolate_(NULL),
@ -177,6 +176,9 @@ Heap::Heap()
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
@ -244,12 +246,17 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
}
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
@ -806,7 +813,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@ -1130,6 +1137,27 @@ void PromotionQueue::RelocateQueueHead() {
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
virtual Object* RetainAs(Object* object) {
if (!heap_->InFromSpace(object)) {
return object;
}
MapWord map_word = HeapObject::cast(object)->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
return NULL;
}
private:
Heap* heap_;
};
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@ -1228,6 +1256,9 @@ void Heap::Scavenge() {
}
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessWeakReferences(&weak_object_retainer);
ASSERT(new_space_front == new_space_.top());
// Set age mark.
@ -1314,7 +1345,8 @@ void Heap::UpdateReferencesInExternalStringTable(
static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function,
WeakObjectRetainer* retainer) {
WeakObjectRetainer* retainer,
bool record_slots) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
JSFunction* tail = NULL;
@ -1331,6 +1363,12 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_next_function_link(retain);
if (record_slots) {
Object** next_function =
HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
heap->mark_compact_collector()->RecordSlot(
next_function, next_function, retain);
}
}
// Retained function is new tail.
candidate_function = reinterpret_cast<JSFunction*>(retain);
@ -1359,6 +1397,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined;
Context* tail = NULL;
Object* candidate = global_contexts_list_;
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
// compacting mark-sweep cycle and we have to record slots manually.
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
while (candidate != undefined) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
@ -1374,6 +1421,14 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Context::NEXT_CONTEXT_LINK,
retain,
UPDATE_WRITE_BARRIER);
if (record_slots) {
Object** next_context =
HeapObject::RawField(
tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
mark_compact_collector()->RecordSlot(
next_context, next_context, retain);
}
}
// Retained context is new tail.
candidate_context = reinterpret_cast<Context*>(retain);
@ -1386,11 +1441,19 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
ProcessFunctionWeakReferences(
this,
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
retainer);
retainer,
record_slots);
candidate_context->set_unchecked(this,
Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head,
UPDATE_WRITE_BARRIER);
if (record_slots) {
Object** optimized_functions =
HeapObject::RawField(
tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
mark_compact_collector()->RecordSlot(
optimized_functions, optimized_functions, function_list_head);
}
}
// Move to next element in the list.
@ -1490,6 +1553,27 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
HeapObject* object,
int size));
static HeapObject* EnsureDoubleAligned(Heap* heap,
HeapObject* object,
int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
heap->CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
kPointerSize);
return object;
}
}
enum LoggingAndProfiling {
LOGGING_AND_PROFILING_ENABLED,
LOGGING_AND_PROFILING_DISABLED
@ -1613,7 +1697,10 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
template<ObjectContents object_contents, SizeRestriction size_restriction>
template<ObjectContents object_contents,
SizeRestriction size_restriction,
int alignment>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
@ -1622,19 +1709,26 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxNonCodeHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
ASSERT(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxNonCodeHeapObjectSize)) {
maybe_result = heap->lo_space()->AllocateRaw(object_size,
(allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
NOT_EXECUTABLE);
} else {
if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
maybe_result =
heap->old_pointer_space()->AllocateRaw(allocation_size);
}
}
@ -1642,6 +1736,10 @@ class ScavengingVisitor : public StaticVisitorBase {
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
@ -1649,18 +1747,27 @@ class ScavengingVisitor : public StaticVisitorBase {
MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) {
if (map->instance_type() == JS_FUNCTION_TYPE) {
heap->promotion_queue()->insert(
target, JSFunction::kNonWeakFieldsEndOffset);
} else {
heap->promotion_queue()->insert(target, object_size);
}
}
heap->tracer()->increment_promoted_objects_size(object_size);
return;
}
}
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
HeapObject* target = HeapObject::cast(result);
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
@ -1696,7 +1803,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
slot,
object,
object_size);
@ -1708,7 +1815,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
map,
slot,
object,
object_size);
@ -1719,7 +1827,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@ -1728,7 +1837,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@ -1737,7 +1847,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@ -1780,7 +1891,8 @@ class ScavengingVisitor : public StaticVisitorBase {
}
int object_size = ConsString::kSize;
EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
map, slot, object, object_size);
}
template<ObjectContents object_contents>
@ -1790,14 +1902,16 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map,
HeapObject** slot,
HeapObject* object) {
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
map, slot, object, object_size);
}
static inline void Visit(Map* map,
HeapObject** slot,
HeapObject* object) {
int object_size = map->instance_size();
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
EvacuateObject<object_contents, SMALL, kObjectAlignment>(
map, slot, object, object_size);
}
};
@ -1914,7 +2028,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->init_prototype_transitions(undefined_value());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
@ -2053,15 +2167,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_prototype_transitions(empty_fixed_array());
meta_map()->init_prototype_transitions(undefined_value());
fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->set_prototype_transitions(empty_fixed_array());
fixed_array_map()->init_prototype_transitions(undefined_value());
oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->set_prototype_transitions(empty_fixed_array());
oddball_map()->init_prototype_transitions(undefined_value());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@ -2360,7 +2474,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(FAST_ELEMENTS);
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@ -2908,8 +3022,8 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times);
share->set_ic_age(0);
share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@ -2941,6 +3055,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
}
JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
@ -3217,6 +3332,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException();
}
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3554,7 +3671,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Map* new_map;
ASSERT(object_function->has_initial_map());
{ MaybeObject* maybe_map =
object_function->initial_map()->CopyDropTransitions();
object_function->initial_map()->CopyDropTransitions(
DescriptorArray::MAY_BE_SHARED);
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
}
Object* prototype;
@ -3642,7 +3760,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
ASSERT(JSObject::cast(result)->HasFastElements());
ASSERT(JSObject::cast(result)->HasFastObjectElements());
return result;
}
@ -3687,7 +3805,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
ASSERT(map->has_fast_elements());
ASSERT(map->has_fast_object_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@ -3702,7 +3820,8 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
{ MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
{ MaybeObject* maybe_descriptors_obj =
DescriptorArray::Allocate(count, DescriptorArray::MAY_BE_SHARED);
if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
return maybe_descriptors_obj;
}
@ -3804,8 +3923,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
JSObject::cast(obj)->HasFastElements());
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
return obj;
}
@ -3833,6 +3951,16 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
MaybeObject* Heap::AllocateJSModule() {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
if (!maybe_map->To(&map)) return maybe_map;
// Allocate the object based on the map.
return AllocateJSObjectFromMap(map, TENURED);
}
MaybeObject* Heap::AllocateJSArrayAndStorage(
ElementsKind elements_kind,
int length,
@ -3840,6 +3968,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@ -3860,8 +3991,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
}
} else {
ASSERT(elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity);
} else {
@ -3887,6 +4017,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array;
}
@ -3969,7 +4100,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details(descs->GetDetails(i));
PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
@ -4371,6 +4502,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
#ifdef DEBUG
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif // DEBUG
return result;
}
@ -4417,13 +4558,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map();
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
map = Map::cast(global_context->double_js_array_map());
} else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
map = Map::cast(global_context->object_js_array_map());
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(map == global_context->smi_js_array_map());
Object* maybe_map_array = global_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(elements_kind);
if (!maybe_transitioned_map->IsUndefined()) {
map = Map::cast(maybe_transitioned_map);
}
}
return AllocateJSObjectFromMap(map, pretenure);
@ -4662,6 +4803,11 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace space =
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = FixedDoubleArray::SizeFor(length);
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
// Too big for new space.
space = LO_SPACE;
@ -4674,7 +4820,12 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace retry_space =
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
return AllocateRaw(size, space, retry_space);
HeapObject* object;
{ MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
return EnsureDoubleAligned(this, object, size);
}
@ -4698,15 +4849,29 @@ MaybeObject* Heap::AllocateGlobalContext() {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map());
context->set_smi_js_array_map(undefined_value());
context->set_double_js_array_map(undefined_value());
context->set_object_js_array_map(undefined_value());
context->set_js_array_maps(undefined_value());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
}
MaybeObject* Heap::AllocateModuleContext(Context* previous,
ScopeInfo* scope_info) {
Object* result;
{ MaybeObject* maybe_result =
AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
context->set_previous(previous);
context->set_extension(scope_info);
context->set_global(previous->global());
return context;
}
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
Object* result;
@ -4849,8 +5014,10 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
bool Heap::IdleNotification(int hint) {
const int kMaxHint = 1000;
intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
// The size factor is in range [3..100].
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
// The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
@ -4874,11 +5041,14 @@ bool Heap::IdleNotification(int hint) {
// Take into account that we might have decided to delay full collection
// because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
StartIdleRound();
return false;
}
if (hint >= kMaxHint || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
return IdleGlobalGC();
}
@ -4917,10 +5087,6 @@ bool Heap::IdleNotification(int hint) {
}
if (incremental_marking()->IsStopped()) {
if (!WorthStartingGCWhenIdle()) {
FinishIdleRound();
return true;
}
incremental_marking()->Start();
}
@ -5558,6 +5724,11 @@ bool Heap::ConfigureHeap(int max_semispace_size,
intptr_t max_executable_size) {
if (HasBeenSetUp()) return false;
if (FLAG_stress_compaction) {
// This will cause more frequent GCs when stressing.
max_semispace_size_ = Page::kPageSize;
}
if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize;
@ -5662,16 +5833,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
@ -5682,7 +5843,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
}
int Heap::PromotedExternalMemorySize() {
intptr_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
@ -5855,6 +6016,15 @@ class HeapDebugUtils {
#endif
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
InitializeScavengingVisitorsTables();
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@ -5873,15 +6043,7 @@ bool Heap::SetUp(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
gc_initializer_mutex.Pointer()->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
initialized_gc = true;
InitializeScavengingVisitorsTables();
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
}
gc_initializer_mutex.Pointer()->Unlock();
CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);
@ -5993,6 +6155,11 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
#ifdef DEBUG
if (FLAG_verify_heap) {
Verify();
}
#endif
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);

59
deps/v8/src/heap.h

@ -243,7 +243,8 @@ namespace internal {
V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity") \
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
V(query_colon_symbol, "(?:)")
// Forward declarations.
class GCTracer;
@ -529,6 +530,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateJSModule();
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
ElementsKind elements_kind,
@ -618,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -820,6 +823,10 @@ class Heap {
// Allocate a global (but otherwise uninitialized) context.
MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
// Allocate a module context.
MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
ScopeInfo* scope_info);
// Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
JSFunction* function);
@ -1326,7 +1333,8 @@ class Heap {
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
inline intptr_t AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes);
// Allocate uninitialized fixed array.
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
@ -1334,7 +1342,7 @@ class Heap {
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize();
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
@ -1355,19 +1363,6 @@ class Heap {
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit =
@ -1411,6 +1406,12 @@ class Heap {
kRootListLength
};
STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
@ -1442,6 +1443,8 @@ class Heap {
inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true;
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
@ -1452,7 +1455,7 @@ class Heap {
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
return false;
}
@ -1490,7 +1493,6 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
@ -1595,7 +1597,7 @@ class Heap {
}
void AgeInlineCaches() {
++global_ic_age_;
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
private:
@ -1605,6 +1607,8 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
Object* roots_[kRootListLength];
intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
@ -1646,7 +1650,7 @@ class Heap {
int gc_post_processing_depth_;
// Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize();
intptr_t PromotedExternalMemorySize();
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@ -1711,17 +1715,15 @@ class Heap {
// The amount of external memory registered through the API kept alive
// by global handles
int amount_of_external_allocated_memory_;
intptr_t amount_of_external_allocated_memory_;
// Caches the amount of external memory registered at the last global gc.
int amount_of_external_allocated_memory_at_last_global_gc_;
intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
int old_gen_exhausted_;
Object* roots_[kRootListLength];
Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
@ -1974,13 +1976,6 @@ class Heap {
return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
}
bool WorthStartingGCWhenIdle() {
if (contexts_disposed_ > 0) {
return true;
}
return incremental_marking()->WorthActivating();
}
// Estimates how many milliseconds a Mark-Sweep would take to complete.
// In idle notification handler we assume that this function will return:
// - a number less than 10 for small heaps, which are less than 8Mb.

247
deps/v8/src/hydrogen-instructions.cc

@ -336,7 +336,8 @@ HUseListNode* HValue::RemoveUse(HValue* value, int index) {
// Do not reuse use list nodes in debug mode, zap them.
if (current != NULL) {
HUseListNode* temp =
new HUseListNode(current->value(), current->index(), NULL);
new(block()->zone())
HUseListNode(current->value(), current->index(), NULL);
current->Zap();
current = temp;
}
@ -416,6 +417,7 @@ void HValue::Kill() {
SetFlag(kIsDead);
for (int i = 0; i < OperandCount(); ++i) {
HValue* operand = OperandAt(i);
if (operand == NULL) continue;
HUseListNode* first = operand->use_list_;
if (first != NULL && first->value() == this && first->index() == i) {
operand->use_list_ = first->tail();
@ -462,7 +464,8 @@ void HValue::PrintChangesTo(StringStream* stream) {
add_comma = true; \
stream->Add(#type); \
}
GVN_FLAG_LIST(PRINT_DO);
GVN_TRACKED_FLAG_LIST(PRINT_DO);
GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
#undef PRINT_DO
}
stream->Add("]");
@ -493,8 +496,8 @@ void HValue::RegisterUse(int index, HValue* new_value) {
if (new_value != NULL) {
if (removed == NULL) {
new_value->use_list_ =
new HUseListNode(this, index, new_value->use_list_);
new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
this, index, new_value->use_list_);
} else {
removed->set_tail(new_value->use_list_);
new_value->use_list_ = removed;
@ -599,6 +602,9 @@ void HInstruction::InsertAfter(HInstruction* previous) {
SetBlock(block);
previous->next_ = this;
if (next != NULL) next->previous_ = this;
if (block->last() == previous) {
block->set_last(this);
}
}
@ -608,6 +614,7 @@ void HInstruction::Verify() {
HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) {
HValue* other_operand = OperandAt(i);
if (other_operand == NULL) continue;
HBasicBlock* other_block = other_operand->block();
if (cur_block == other_block) {
if (!other_operand->IsPhi()) {
@ -866,6 +873,17 @@ HValue* HBitwise::Canonicalize() {
}
HValue* HBitNot::Canonicalize() {
// Optimize ~~x, a common pattern used for ToInt32(x).
if (value()->IsBitNot()) {
HValue* result = HBitNot::cast(value())->value();
ASSERT(result->representation().IsInteger32());
return result;
}
return this;
}
HValue* HAdd::Canonicalize() {
if (!representation().IsInteger32()) return this;
if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
@ -916,6 +934,62 @@ void HJSArrayLength::PrintDataTo(StringStream* stream) {
}
HValue* HUnaryMathOperation::Canonicalize() {
if (op() == kMathFloor) {
// If the input is integer32 then we replace the floor instruction
// with its input. This happens before the representation changes are
// introduced.
if (value()->representation().IsInteger32()) return value();
#ifdef V8_TARGET_ARCH_ARM
if (value()->IsDiv() && (value()->UseCount() == 1)) {
// TODO(2038): Implement this optimization for non ARM architectures.
HDiv* hdiv = HDiv::cast(value());
HValue* left = hdiv->left();
HValue* right = hdiv->right();
// Try to simplify left and right values of the division.
HValue* new_left =
LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
HValue* new_right =
LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
// Return if left or right are not optimizable.
if ((new_left == NULL) || (new_right == NULL)) return this;
// Insert the new values in the graph.
if (new_left->IsInstruction() &&
!HInstruction::cast(new_left)->IsLinked()) {
HInstruction::cast(new_left)->InsertBefore(this);
}
if (new_right->IsInstruction() &&
!HInstruction::cast(new_right)->IsLinked()) {
HInstruction::cast(new_right)->InsertBefore(this);
}
HMathFloorOfDiv* instr = new(block()->zone()) HMathFloorOfDiv(context(),
new_left,
new_right);
// Replace this HMathFloor instruction by the new HMathFloorOfDiv.
instr->InsertBefore(this);
ReplaceAllUsesWith(instr);
Kill();
// We know the division had no other uses than this HMathFloor. Delete it.
// Also delete the arguments of the division if they are not used any
// more.
hdiv->DeleteAndReplaceWith(NULL);
ASSERT(left->IsChange() || left->IsConstant());
ASSERT(right->IsChange() || right->IsConstant());
if (left->HasNoUses()) left->DeleteAndReplaceWith(NULL);
if (right->HasNoUses()) right->DeleteAndReplaceWith(NULL);
// Return NULL to remove this instruction from the graph.
return NULL;
}
#endif // V8_TARGET_ARCH_ARM
}
return this;
}
HValue* HCheckInstanceType::Canonicalize() {
if (check_ == IS_STRING &&
!value()->type().IsUninitialized() &&
@ -965,16 +1039,13 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
void HCheckMap::PrintDataTo(StringStream* stream) {
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" %p", *map());
if (mode() == REQUIRE_EXACT_MAP) {
stream->Add(" [EXACT]");
} else if (!has_element_transitions_) {
stream->Add(" [EXACT*]");
} else {
stream->Add(" [MATCH ELEMENTS]");
stream->Add(" [%p", *map_set()->first());
for (int i = 1; i < map_set()->length(); ++i) {
stream->Add(",%p", *map_set()->at(i));
}
stream->Add("]");
}
@ -1181,7 +1252,7 @@ void HPhi::PrintTo(StringStream* stream) {
void HPhi::AddInput(HValue* value) {
inputs_.Add(NULL);
inputs_.Add(NULL, value->block()->zone());
SetOperandAt(OperandCount() - 1, value);
// Mark phis that may have 'arguments' directly or indirectly as an operand.
if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
@ -1228,14 +1299,33 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
Representation rep = value->RequiredInputRepresentation(it.index());
Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
PrintF("%d %s is used by %d %s as %s\n",
this->id(),
this->Mnemonic(),
value->id(),
value->Mnemonic(),
rep.Mnemonic());
}
}
}
}
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
this->id(),
this->Mnemonic(),
other->id(),
other->Mnemonic(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
}
for (int i = 0; i < Representation::kNumRepresentations; i++) {
indirect_uses_[i] += other->non_phi_uses_[i];
}
@ -1249,6 +1339,12 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
void HPhi::ResetInteger32Uses() {
non_phi_uses_[Representation::kInteger32] = 0;
indirect_uses_[Representation::kInteger32] = 0;
}
void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d", ast_id());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
@ -1302,17 +1398,17 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
}
HConstant* HConstant::CopyToRepresentation(Representation r) const {
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
return new HConstant(handle_, r);
return new(zone) HConstant(handle_, r);
}
HConstant* HConstant::CopyToTruncatedInt32() const {
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_);
return new HConstant(FACTORY->NewNumberFromInt(truncated),
return new(zone) HConstant(FACTORY->NewNumberFromInt(truncated),
Representation::Integer32());
}
@ -1522,17 +1618,51 @@ void HLoadNamedField::PrintDataTo(StringStream* stream) {
}
// Returns true if an instance of this map can never find a property with this
// name in its prototype chain. This means all prototypes up to the top are
// fast and don't have the name in them. It would be good if we could optimize
// polymorphic loads where the property is sometimes found in the prototype
// chain.
static bool PrototypeChainCanNeverResolve(
Handle<Map> map, Handle<String> name) {
Isolate* isolate = map->GetIsolate();
Object* current = map->prototype();
while (current != isolate->heap()->null_value()) {
if (current->IsJSGlobalProxy() ||
current->IsGlobalObject() ||
!current->IsJSObject() ||
JSObject::cast(current)->IsAccessCheckNeeded() ||
!JSObject::cast(current)->HasFastProperties()) {
return false;
}
LookupResult lookup(isolate);
JSObject::cast(current)->map()->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsFound()) {
if (lookup.type() != MAP_TRANSITION) return false;
} else if (!lookup.IsCacheable()) {
return false;
}
current = JSObject::cast(current)->GetPrototype();
}
return true;
}
HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
Handle<String> name)
: types_(Min(types->length(), kMaxLoadPolymorphism)),
Handle<String> name,
Zone* zone)
: types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
SmallMapList negative_lookups;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
@ -1548,21 +1678,39 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
} else {
SetGVNFlag(kDependsOnBackingStoreFields);
}
types_.Add(types->at(i));
types_.Add(types->at(i), zone);
break;
}
case CONSTANT_FUNCTION:
types_.Add(types->at(i));
types_.Add(types->at(i), zone);
break;
case MAP_TRANSITION:
if (PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
break;
default:
break;
}
} else if (lookup.IsCacheable()) {
if (PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
}
}
if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
bool need_generic =
(types->length() != negative_lookups.length() + types_.length());
if (!need_generic && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
for (int i = 0; i < negative_lookups.length(); i++) {
types_.Add(negative_lookups.at(i), zone);
}
} else {
// We don't have an easy way to handle both a call (to the generic stub) and
// a deopt in the same hydrogen instruction, so in this case we don't add
// the negative lookups which can deopt - just let the generic stub handle
// them.
SetAllSideEffects();
need_generic_ = true;
}
@ -1607,11 +1755,14 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
}
bool HLoadKeyedFastElement::RequiresHoleCheck() {
if (hole_check_mode_ == OMIT_HOLE_CHECK) {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
@ -1657,12 +1808,11 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
load->InsertBefore(this);
return load;
}
@ -1706,8 +1856,11 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1736,6 +1889,9 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
value()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition());
}
@ -1801,9 +1957,12 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1818,7 +1977,13 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
ElementsKind from_kind = original_map()->elements_kind();
ElementsKind to_kind = transitioned_map()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
*original_map(),
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
}
@ -1879,7 +2044,7 @@ HType HValue::CalculateInferredType() {
}
HType HCheckMap::CalculateInferredType() {
HType HCheckMaps::CalculateInferredType() {
return value()->type();
}
@ -2089,6 +2254,17 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
// If value was loaded from unboxed double backing store or
// converted from an integer then we don't have to canonicalize it.
if (value()->IsLoadKeyedFastDoubleElement() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false;
}
return true;
}
#define H_CONSTANT_INT32(val) \
new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
Representation::Integer32())
@ -2257,6 +2433,13 @@ void HIn::PrintDataTo(StringStream* stream) {
}
void HBitwise::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(op_));
stream->Add(" ");
HBitwiseBinaryOperation::PrintDataTo(stream);
}
Representation HPhi::InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;

462
deps/v8/src/hydrogen-instructions.h

@ -85,7 +85,7 @@ class LChunkBuilder;
V(Change) \
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
V(CheckMaps) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
@ -140,6 +140,7 @@ class LChunkBuilder;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MathFloorOfDiv) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@ -188,7 +189,10 @@ class LChunkBuilder;
V(DateField) \
V(WrapReceiver)
#define GVN_FLAG_LIST(V) \
#define GVN_TRACKED_FLAG_LIST(V) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
V(Calls) \
V(InobjectFields) \
V(BackingStoreFields) \
@ -506,14 +510,18 @@ class HUseIterator BASE_EMBEDDED {
// There must be one corresponding kDepends flag for every kChanges flag and
// the order of the kChanges flags must be exactly the same as of the kDepends
// flags.
// flags. All tracked flags should appear before untracked ones.
enum GVNFlag {
// Declare global value numbering flags.
#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
GVN_FLAG_LIST(DECLARE_FLAG)
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
kAfterLastFlag,
kLastFlag = kAfterLastFlag - 1
kLastFlag = kAfterLastFlag - 1,
#define COUNT_FLAG(type) + 1
kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
#undef COUNT_FLAG
};
typedef EnumSet<GVNFlag> GVNFlagSet;
@ -530,6 +538,10 @@ class HValue: public ZoneObject {
// implement DataEquals(), which will be used to determine if other
// occurrences of the instruction are indeed the same.
kUseGVN,
// Track instructions that are dominating side effects. If an instruction
// sets this flag, it must implement SetSideEffectDominator() and should
// indicate which side effects to track by setting GVN flags.
kTrackSideEffectDominators,
kCanOverflow,
kBailoutOnMinusZero,
kCanBeDivByZero,
@ -544,6 +556,12 @@ class HValue: public ZoneObject {
static const int kChangesToDependsFlagsLeftShift = 1;
static GVNFlag ChangesFlagFromInt(int x) {
return static_cast<GVNFlag>(x * 2);
}
static GVNFlag DependsOnFlagFromInt(int x) {
return static_cast<GVNFlag>(x * 2 + 1);
}
static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
}
@ -702,6 +720,11 @@ class HValue: public ZoneObject {
return representation();
}
// Type feedback access.
virtual Representation ObservedInputRepresentation(int index) {
return RequiredInputRepresentation(index);
}
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
// instruction with a new one, first add the new instruction to the graph,
@ -726,6 +749,13 @@ class HValue: public ZoneObject {
virtual HType CalculateInferredType();
// This function must be overridden for instructions which have the
// kTrackSideEffectDominators flag set, to track instructions that are
// dominating side effects.
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
UNREACHABLE();
}
#ifdef DEBUG
virtual void Verify() = 0;
#endif
@ -756,7 +786,8 @@ class HValue: public ZoneObject {
GVNFlagSet result;
// Create changes mask.
#define ADD_FLAG(type) result.Add(kDependsOn##type);
GVN_FLAG_LIST(ADD_FLAG)
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
return result;
}
@ -765,7 +796,8 @@ class HValue: public ZoneObject {
GVNFlagSet result;
// Create changes mask.
#define ADD_FLAG(type) result.Add(kChanges##type);
GVN_FLAG_LIST(ADD_FLAG)
GVN_TRACKED_FLAG_LIST(ADD_FLAG)
GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
return result;
}
@ -781,6 +813,7 @@ class HValue: public ZoneObject {
// an executing program (i.e. are not safe to repeat, move or remove);
static GVNFlagSet AllObservableSideEffectsFlagSet() {
GVNFlagSet result = AllChangesFlagSet();
result.Remove(kChangesNewSpacePromotion);
result.Remove(kChangesElementsKind);
result.Remove(kChangesElementsPointer);
result.Remove(kChangesMaps);
@ -959,7 +992,8 @@ class HSoftDeoptimize: public HTemplateInstruction<0> {
class HDeoptimize: public HControlInstruction {
public:
explicit HDeoptimize(int environment_length) : values_(environment_length) { }
HDeoptimize(int environment_length, Zone* zone)
: values_(environment_length, zone) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@ -978,8 +1012,8 @@ class HDeoptimize: public HControlInstruction {
UNREACHABLE();
}
void AddEnvironmentValue(HValue* value) {
values_.Add(NULL);
void AddEnvironmentValue(HValue* value, Zone* zone) {
values_.Add(NULL, zone);
SetOperandAt(values_.length() - 1, value);
}
@ -1196,6 +1230,7 @@ class HChange: public HUnaryOperation {
SetFlag(kUseGVN);
if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
if (is_truncating) SetFlag(kTruncatingToInt32);
if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@ -1246,11 +1281,12 @@ class HClampToUint8: public HUnaryOperation {
class HSimulate: public HInstruction {
public:
HSimulate(int ast_id, int pop_count)
HSimulate(int ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2),
assigned_indexes_(2) {}
values_(2, zone),
assigned_indexes_(2, zone),
zone_(zone) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
@ -1298,9 +1334,9 @@ class HSimulate: public HInstruction {
private:
static const int kNoIndex = -1;
void AddValue(int index, HValue* value) {
assigned_indexes_.Add(index);
assigned_indexes_.Add(index, zone_);
// Resize the list of pushed values.
values_.Add(NULL);
values_.Add(NULL, zone_);
// Set the operand through the base method in HValue to make sure that the
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
@ -1309,6 +1345,7 @@ class HSimulate: public HInstruction {
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
};
@ -1321,6 +1358,7 @@ class HStackCheck: public HTemplateInstruction<1> {
HStackCheck(HValue* context, Type type) : type_(type) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* context() { return OperandAt(0); }
@ -1354,13 +1392,15 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function,
CallKind call_kind,
bool is_construct,
Variable* arguments)
Variable* arguments_var,
ZoneList<HValue*>* arguments_values)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind),
is_construct_(is_construct),
arguments_(arguments) {
arguments_var_(arguments_var),
arguments_values_(arguments_values) {
}
virtual void PrintDataTo(StringStream* stream);
@ -1375,7 +1415,8 @@ class HEnterInlined: public HTemplateInstruction<0> {
return Representation::None();
}
Variable* arguments() { return arguments_; }
Variable* arguments_var() { return arguments_var_; }
ZoneList<HValue*>* arguments_values() { return arguments_values_; }
DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
@ -1385,19 +1426,28 @@ class HEnterInlined: public HTemplateInstruction<0> {
FunctionLiteral* function_;
CallKind call_kind_;
bool is_construct_;
Variable* arguments_;
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
};
class HLeaveInlined: public HTemplateInstruction<0> {
public:
HLeaveInlined() {}
explicit HLeaveInlined(bool arguments_pushed)
: arguments_pushed_(arguments_pushed) { }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
bool arguments_pushed() {
return arguments_pushed_;
}
DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
private:
bool arguments_pushed_;
};
@ -1605,14 +1655,26 @@ class HInvokeFunction: public HBinaryCall {
: HBinaryCall(context, function, argument_count) {
}
HInvokeFunction(HValue* context,
HValue* function,
Handle<JSFunction> known_function,
int argument_count)
: HBinaryCall(context, function, argument_count),
known_function_(known_function) {
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* context() { return first(); }
HValue* function() { return second(); }
Handle<JSFunction> known_function() { return known_function_; }
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
private:
Handle<JSFunction> known_function_;
};
@ -1786,7 +1848,9 @@ class HCallRuntime: public HCall<1> {
class HJSArrayLength: public HTemplateInstruction<2> {
public:
HJSArrayLength(HValue* value, HValue* typecheck) {
HJSArrayLength(HValue* value, HValue* typecheck,
HType type = HType::Tagged()) {
set_type(type);
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
@ -1810,7 +1874,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual bool DataEquals(HValue* other_raw) { return true; }
};
@ -1865,6 +1929,8 @@ class HBitNot: public HUnaryOperation {
}
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
DECLARE_CONCRETE_INSTRUCTION(BitNot)
protected:
@ -1887,6 +1953,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathAbs:
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetGVNFlag(kChangesNewSpacePromotion);
break;
case kMathSqrt:
case kMathPowHalf:
@ -1895,6 +1962,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathCos:
case kMathTan:
set_representation(Representation::Double());
SetGVNFlag(kChangesNewSpacePromotion);
break;
default:
UNREACHABLE();
@ -1935,15 +2003,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
}
virtual HValue* Canonicalize() {
// If the input is integer32 then we replace the floor instruction
// with its inputs. This happens before the representation changes are
// introduced.
if (op() == kMathFloor) {
if (value()->representation().IsInteger32()) return value();
}
return this;
}
virtual HValue* Canonicalize();
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
@ -2003,14 +2063,10 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
};
class HCheckMap: public HTemplateInstruction<2> {
class HCheckMaps: public HTemplateInstruction<2> {
public:
HCheckMap(HValue* value,
Handle<Map> map,
HValue* typecheck = NULL,
CompareMapMode mode = REQUIRE_EXACT_MAP)
: map_(map),
mode_(mode) {
HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
HValue* typecheck = NULL) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@ -2018,14 +2074,43 @@ class HCheckMap: public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// If the map to check doesn't have the untransitioned elements, it must not
// be hoisted above TransitionElements instructions.
if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) {
SetGVNFlag(kDependsOnElementsKind);
map_set()->Add(map, zone);
}
has_element_transitions_ =
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) {
map_set()->Add(maps->at(i), zone);
}
map_set()->Sort();
}
static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map,
Zone* zone) {
HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone);
SmallMapList* map_set = check_map->map_set();
// Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
check_map->ClearGVNFlag(kDependsOnElementsKind);
ElementsKind kind = map->elements_kind();
bool packed = IsFastPackedElementsKind(kind);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
Map* transitioned_map =
map->LookupElementsTransitionMap(kind);
if (transitioned_map) {
map_set->Add(Handle<Map>(transitioned_map), zone);
}
};
map_set->Sort();
return check_map;
}
virtual Representation RequiredInputRepresentation(int index) {
@ -2035,25 +2120,23 @@ class HCheckMap: public HTemplateInstruction<2> {
virtual HType CalculateInferredType();
HValue* value() { return OperandAt(0); }
Handle<Map> map() const { return map_; }
CompareMapMode mode() const { return mode_; }
SmallMapList* map_set() { return &map_set_; }
DECLARE_CONCRETE_INSTRUCTION(CheckMap)
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) {
HCheckMap* b = HCheckMap::cast(other);
// Two CheckMaps instructions are DataEqual if their maps are identical and
// they have the same mode. The mode comparison can be ignored if the map
// has no elements transitions.
return map_.is_identical_to(b->map()) &&
(b->mode() == mode() || !has_element_transitions_);
HCheckMaps* b = HCheckMaps::cast(other);
// Relies on the fact that map_set has been sorted before.
if (map_set()->length() != b->map_set()->length()) return false;
for (int i = 0; i < map_set()->length(); i++) {
if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
}
return true;
}
private:
bool has_element_transitions_;
Handle<Map> map_;
CompareMapMode mode_;
SmallMapList map_set_;
};
@ -2092,17 +2175,17 @@ class HCheckFunction: public HUnaryOperation {
class HCheckInstanceType: public HUnaryOperation {
public:
static HCheckInstanceType* NewIsSpecObject(HValue* value) {
return new HCheckInstanceType(value, IS_SPEC_OBJECT);
static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT);
}
static HCheckInstanceType* NewIsJSArray(HValue* value) {
return new HCheckInstanceType(value, IS_JS_ARRAY);
static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_JS_ARRAY);
}
static HCheckInstanceType* NewIsString(HValue* value) {
return new HCheckInstanceType(value, IS_STRING);
static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_STRING);
}
static HCheckInstanceType* NewIsSymbol(HValue* value) {
return new HCheckInstanceType(value, IS_SYMBOL);
static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_SYMBOL);
}
virtual void PrintDataTo(StringStream* stream);
@ -2251,8 +2334,8 @@ class HCheckSmi: public HUnaryOperation {
class HPhi: public HValue {
public:
explicit HPhi(int merged_index)
: inputs_(2),
HPhi(int merged_index, Zone* zone)
: inputs_(2, zone),
merged_index_(merged_index),
phi_id_(-1),
is_live_(false),
@ -2331,11 +2414,15 @@ class HPhi: public HValue {
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) return false;
if (!OperandAt(i)->IsConvertibleToInteger()) {
return false;
}
}
return true;
}
void ResetInteger32Uses();
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@ -2407,8 +2494,8 @@ class HConstant: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
HConstant* CopyToRepresentation(Representation r) const;
HConstant* CopyToTruncatedInt32() const;
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; }
int32_t Integer32Value() const {
ASSERT(HasInteger32Value());
@ -2485,6 +2572,7 @@ class HBinaryOperation: public HTemplateInstruction<3> {
if (IsCommutative() && left()->IsConstant()) return right();
return left();
}
HValue* MostConstantOperand() {
if (IsCommutative() && left()->IsConstant()) return left();
return right();
@ -2549,7 +2637,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
class HArgumentsElements: public HTemplateInstruction<0> {
public:
HArgumentsElements() {
explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
// The value produced by this instruction is a pointer into the stack
// that looks as if it was a smi because of alignment.
set_representation(Representation::Tagged());
@ -2562,8 +2650,12 @@ class HArgumentsElements: public HTemplateInstruction<0> {
return Representation::None();
}
bool from_inlined() const { return from_inlined_; }
protected:
virtual bool DataEquals(HValue* other) { return true; }
bool from_inlined_;
};
@ -2646,6 +2738,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
observed_input_representation_[0] = Representation::Tagged();
observed_input_representation_[1] = Representation::None();
observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@ -2665,7 +2760,38 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual HType CalculateInferredType();
virtual Representation ObservedInputRepresentation(int index) {
return observed_input_representation_[index];
}
void InitializeObservedInputRepresentation(Representation r) {
observed_input_representation_[1] = r;
observed_input_representation_[2] = r;
}
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
Representation observed_input_representation_[3];
};
class HMathFloorOfDiv: public HBinaryOperation {
public:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
@ -3083,6 +3209,7 @@ class HPower: public HTemplateInstruction<2> {
SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* left() { return OperandAt(0); }
@ -3282,6 +3409,8 @@ class HBitwise: public HBitwiseBinaryOperation {
HValue* left,
HValue* right);
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
protected:
@ -3529,6 +3658,12 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
}
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
return !object->IsAllocateObject() || (object != new_space_dominator);
}
class HStoreGlobalCell: public HUnaryOperation {
public:
HStoreGlobalCell(HValue* value,
@ -3759,7 +3894,8 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
HLoadNamedFieldPolymorphic(HValue* context,
HValue* object,
SmallMapList* types,
Handle<String> name);
Handle<String> name,
Zone* zone);
HValue* context() { return OperandAt(0); }
HValue* object() { return OperandAt(1); }
@ -3836,15 +3972,29 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; }
};
class HLoadKeyedFastElement: public HTemplateInstruction<2> {
class ArrayInstructionInterface {
public:
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
};
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: hole_check_mode_(hole_check_mode) {
ElementsKind elements_kind = FAST_ELEMENTS)
: bit_field_(0) {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
bit_field_ = ElementsKindField::encode(elements_kind);
if (IsFastSmiElementsKind(elements_kind) &&
IsFastPackedElementsKind(elements_kind)) {
set_type(HType::Smi());
}
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
@ -3854,6 +4004,19 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
}
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
void SetDehoisted(bool is_dehoisted) {
bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
}
ElementsKind elements_kind() const {
return ElementsKindField::decode(bit_field_);
}
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -3872,17 +4035,32 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
if (IsDehoisted() && index_offset() != other_load->index_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
private:
HoleCheckMode hole_check_mode_;
class ElementsKindField: public BitField<ElementsKind, 0, 4> {};
class IndexOffsetField: public BitField<uint32_t, 4, 27> {};
class IsDehoistedField: public BitField<bool, 31, 1> {};
uint32_t bit_field_;
};
class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
@ -3892,6 +4070,12 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -3900,21 +4084,38 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
: Representation::Integer32();
}
bool RequiresHoleCheck() {
return hole_check_mode_ == PERFORM_HOLE_CHECK;
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastDoubleElement()) return false;
HLoadKeyedFastDoubleElement* other_load =
HLoadKeyedFastDoubleElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
uint32_t index_offset_;
bool is_dehoisted_;
HoleCheckMode hole_check_mode_;
};
class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
class HLoadKeyedSpecializedArrayElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
ElementsKind elements_kind)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@ -3942,6 +4143,12 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone);
@ -3957,6 +4164,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -3995,9 +4204,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
int offset)
: name_(name),
is_in_object_(in_object),
offset_(offset) {
offset_(offset),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnNewSpacePromotion);
if (is_in_object_) {
SetGVNFlag(kChangesInobjectFields);
} else {
@ -4010,6 +4222,10 @@ class HStoreNamedField: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
}
virtual void PrintDataTo(StringStream* stream);
HValue* object() { return OperandAt(0); }
@ -4020,9 +4236,15 @@ class HStoreNamedField: public HTemplateInstruction<2> {
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; }
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
bool NeedsWriteBarrierForMap() {
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
private:
@ -4030,6 +4252,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
bool is_in_object_;
int offset_;
Handle<Map> transition_;
HValue* new_space_dominator_;
};
@ -4068,11 +4291,12 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
class HStoreKeyedFastElement: public HTemplateInstruction<3> {
class HStoreKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4090,8 +4314,14 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
bool value_is_smi() {
return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
return IsFastSmiElementsKind(elements_kind_);
}
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
@ -4107,14 +4337,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
class HStoreKeyedFastDoubleElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key,
HValue* val) {
HValue* val)
: index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4134,24 +4368,37 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
private:
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
class HStoreKeyedSpecializedArrayElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* val,
ElementsKind elements_kind)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
@ -4179,11 +4426,19 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -4230,8 +4485,19 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
// Don't set GVN DependOn flags here. That would defeat GVN's detection of
// congruent HTransitionElementsKind instructions. Instruction hoisting
// handles HTransitionElementsKind instruction specially, explicitly adding
// DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind);
if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@ -4293,6 +4559,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kChangesNewSpacePromotion);
}
virtual Representation RequiredInputRepresentation(int index) {
@ -4324,6 +4591,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
SetOperandAt(1, char_code);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kChangesNewSpacePromotion);
}
virtual Representation RequiredInputRepresentation(int index) {
@ -4376,8 +4644,12 @@ class HAllocateObject: public HTemplateInstruction<1> {
: constructor_(constructor) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion);
}
// Maximum instance size for which allocations will be inlined.
static const int kMaxSize = 64 * kPointerSize;
HValue* context() { return OperandAt(0); }
Handle<JSFunction> constructor() { return constructor_; }
@ -4421,6 +4693,7 @@ class HFastLiteral: public HMaterializedLiteral<1> {
boilerplate_(boilerplate),
total_size_(total_size) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}
// Maximum depth and total number of elements and properties for literal
@ -4456,12 +4729,13 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
length_(length),
boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) {
return FAST_ELEMENTS;
return TERMINAL_FAST_ELEMENTS_KIND;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}
@ -4496,6 +4770,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
fast_elements_(fast_elements),
has_function_(has_function) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* context() { return OperandAt(0); }
@ -4557,6 +4832,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
: shared_info_(shared), pretenure_(pretenure) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
SetGVNFlag(kChangesNewSpacePromotion);
}
HValue* context() { return OperandAt(0); }

1829
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

147
deps/v8/src/hydrogen.h

@ -42,6 +42,7 @@ namespace internal {
// Forward declarations.
class BitVector;
class FunctionState;
class HEnvironment;
class HGraph;
class HLoopInformation;
@ -76,7 +77,7 @@ class HBasicBlock: public ZoneObject {
return &deleted_phis_;
}
void RecordDeletedPhi(int merge_index) {
deleted_phis_.Add(merge_index);
deleted_phis_.Add(merge_index, zone());
}
HBasicBlock* dominator() const { return dominator_; }
HEnvironment* last_environment() const { return last_environment_; }
@ -121,7 +122,7 @@ class HBasicBlock: public ZoneObject {
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, bool drop_extra = false);
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@ -136,7 +137,7 @@ class HBasicBlock: public ZoneObject {
// instruction and updating the bailout environment.
void AddLeaveInlined(HValue* return_value,
HBasicBlock* target,
bool drop_extra = false);
FunctionState* state = NULL);
// If a target block is tagged as an inline function return, all
// predecessors should contain the inlined exit sequence:
@ -157,7 +158,7 @@ class HBasicBlock: public ZoneObject {
dominates_loop_successors_ = true;
}
inline Zone* zone();
inline Zone* zone() const;
#ifdef DEBUG
void Verify();
@ -211,12 +212,12 @@ class HPredecessorIterator BASE_EMBEDDED {
class HLoopInformation: public ZoneObject {
public:
explicit HLoopInformation(HBasicBlock* loop_header)
: back_edges_(4),
HLoopInformation(HBasicBlock* loop_header, Zone* zone)
: back_edges_(4, zone),
loop_header_(loop_header),
blocks_(8),
blocks_(8, zone),
stack_check_(NULL) {
blocks_.Add(loop_header);
blocks_.Add(loop_header, zone);
}
virtual ~HLoopInformation() {}
@ -240,13 +241,13 @@ class HLoopInformation: public ZoneObject {
HStackCheck* stack_check_;
};
class BoundsCheckTable;
class HGraph: public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
HGraph(CompilationInfo* info, Zone* zone);
Isolate* isolate() { return isolate_; }
Zone* zone() { return isolate_->zone(); }
Zone* zone() const { return zone_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@ -265,6 +266,8 @@ class HGraph: public ZoneObject {
void OrderBlocks();
void AssignDominators();
void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
@ -277,7 +280,7 @@ class HGraph: public ZoneObject {
void CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
Handle<Code> Compile(CompilationInfo* info, Zone* zone);
void set_undefined_constant(HConstant* constant) {
undefined_constant_.set(constant);
@ -301,7 +304,7 @@ class HGraph: public ZoneObject {
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
int GetNextValueID(HValue* value) {
values_.Add(value);
values_.Add(value, zone());
return values_.length() - 1;
}
HValue* LookupValue(int id) const {
@ -333,6 +336,14 @@ class HGraph: public ZoneObject {
osr_values_.set(values);
}
void MarkRecursive() {
is_recursive_ = true;
}
bool is_recursive() const {
return is_recursive_;
}
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
@ -357,6 +368,7 @@ class HGraph: public ZoneObject {
void InferTypes(ZoneList<HValue*>* worklist);
void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
Isolate* isolate_;
int next_block_id_;
@ -376,11 +388,15 @@ class HGraph: public ZoneObject {
SetOncePointer<HBasicBlock> osr_loop_entry_;
SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
Zone* zone_;
bool is_recursive_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
Zone* HBasicBlock::zone() { return graph_->zone(); }
Zone* HBasicBlock::zone() const { return graph_->zone(); }
// Type of stack frame an environment might refer to.
@ -391,7 +407,8 @@ class HEnvironment: public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
Handle<JSFunction> closure);
Handle<JSFunction> closure,
Zone* zone);
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@ -458,7 +475,7 @@ class HEnvironment: public ZoneObject {
void Push(HValue* value) {
ASSERT(value != NULL);
++push_count_;
values_.Add(value);
values_.Add(value, zone());
}
HValue* Pop() {
@ -515,13 +532,16 @@ class HEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
void PrintToStd();
Zone* zone() const { return zone_; }
private:
explicit HEnvironment(const HEnvironment* other);
HEnvironment(const HEnvironment* other, Zone* zone);
HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
int arguments);
int arguments,
Zone* zone);
// Create an artificial stub environment (e.g. for argument adaptor or
// constructor stub).
@ -559,6 +579,7 @@ class HEnvironment: public ZoneObject {
int pop_count_;
int push_count_;
int ast_id_;
Zone* zone_;
};
@ -603,7 +624,7 @@ class AstContext {
HGraphBuilder* owner() const { return owner_; }
inline Zone* zone();
inline Zone* zone() const;
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
@ -715,6 +736,16 @@ class FunctionState {
FunctionState* outer() { return outer_; }
HEnterInlined* entry() { return entry_; }
void set_entry(HEnterInlined* entry) { entry_ = entry; }
HArgumentsElements* arguments_elements() { return arguments_elements_; }
void set_arguments_elements(HArgumentsElements* arguments_elements) {
arguments_elements_ = arguments_elements;
}
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
HGraphBuilder* owner_;
@ -741,6 +772,12 @@ class FunctionState {
// return blocks. NULL in all other cases.
TestContext* test_context_;
// When inlining HEnterInlined instruction corresponding to the function
// entry.
HEnterInlined* entry_;
HArgumentsElements* arguments_elements_;
FunctionState* outer_;
};
@ -801,7 +838,7 @@ class HGraphBuilder: public AstVisitor {
BreakAndContinueScope* next_;
};
HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle, Zone* zone);
HGraph* CreateGraph();
@ -851,15 +888,11 @@ class HGraphBuilder: public AstVisitor {
static const int kMaxLoadPolymorphism = 4;
static const int kMaxStorePolymorphism = 4;
static const int kMaxInlinedNodes = 196;
static const int kMaxInlinedSize = 196;
static const int kMaxSourceSize = 600;
// Even in the 'unlimited' case we have to have some limit in order not to
// overflow the stack.
static const int kUnlimitedMaxInlinedNodes = 1000;
static const int kUnlimitedMaxInlinedSize = 1000;
static const int kUnlimitedMaxSourceSize = 600;
static const int kUnlimitedMaxInlinedSourceSize = 100000;
static const int kUnlimitedMaxInlinedNodes = 10000;
static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
// Simple accessors.
void set_function_state(FunctionState* state) { function_state_ = state; }
@ -896,11 +929,6 @@ class HGraphBuilder: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count);
void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr);
void VisitTypeof(UnaryOperation* expr);
@ -994,11 +1022,13 @@ class HGraphBuilder: public AstVisitor {
LookupResult* lookup,
bool is_store);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
@ -1029,6 +1059,10 @@ class HGraphBuilder: public AstVisitor {
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicLoadNamedField(Property* expr,
HValue* object,
SmallMapList* types,
Handle<String> name);
void HandlePolymorphicStoreNamedField(Assignment* expr,
HValue* object,
HValue* value,
@ -1076,6 +1110,7 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,
@ -1126,7 +1161,7 @@ class HGraphBuilder: public AstVisitor {
Handle<Map> receiver_map,
bool smi_and_map_check);
Zone* zone() { return zone_; }
Zone* zone() const { return zone_; }
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@ -1145,6 +1180,7 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* current_block_;
int inlined_count_;
ZoneList<Handle<Object> > globals_;
Zone* zone_;
@ -1157,12 +1193,12 @@ class HGraphBuilder: public AstVisitor {
};
Zone* AstContext::zone() { return owner_->zone(); }
Zone* AstContext::zone() const { return owner_->zone(); }
class HValueMap: public ZoneObject {
public:
HValueMap()
explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
@ -1170,15 +1206,15 @@ class HValueMap: public ZoneObject {
array_(NULL),
lists_(NULL),
free_list_head_(kNil) {
ResizeLists(kInitialSize);
Resize(kInitialSize);
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
void Kill(GVNFlagSet flags);
void Add(HValue* value) {
void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags());
Insert(value);
Insert(value, zone);
}
HValue* Lookup(HValue* value) const;
@ -1202,9 +1238,9 @@ class HValueMap: public ZoneObject {
HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size);
void ResizeLists(int new_size);
void Insert(HValue* value);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
@ -1219,6 +1255,31 @@ class HValueMap: public ZoneObject {
};
class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(GVNFlagSet flags);
void Store(GVNFlagSet flags, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
inline HInstruction* operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < kNumberOfTrackedSideEffects);
return data_[i];
}
inline HInstruction* at(int i) const { return operator[](i); }
private:
int count_;
HInstruction* data_[kNumberOfTrackedSideEffects];
};
class HStatistics: public Malloced {
public:
void Initialize(CompilationInfo* info);
@ -1332,7 +1393,7 @@ class HTracer: public Malloced {
WriteChars(filename, "", 0, false);
}
void TraceLiveRange(LiveRange* range, const char* type);
void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
void Trace(const char* name, HGraph* graph, LChunk* chunk);
void FlushToFile();

3
deps/v8/src/ia32/assembler-ia32.h

@ -640,6 +640,9 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
// Code generation

20
deps/v8/src/ia32/builtins-ia32.cc

@ -831,7 +831,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Copy all arguments from the array to the stack.
Label entry, loop;
__ mov(eax, Operand(ebp, kIndexOffset));
__ mov(ecx, Operand(ebp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
@ -848,16 +848,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(eax);
// Update the index on the stack and in register eax.
__ mov(eax, Operand(ebp, kIndexOffset));
__ add(eax, Immediate(1 << kSmiTagSize));
__ mov(Operand(ebp, kIndexOffset), eax);
__ mov(ecx, Operand(ebp, kIndexOffset));
__ add(ecx, Immediate(1 << kSmiTagSize));
__ mov(Operand(ebp, kIndexOffset), ecx);
__ bind(&entry);
__ cmp(eax, Operand(ebp, kLimitOffset));
__ cmp(ecx, Operand(ebp, kLimitOffset));
__ j(not_equal, &loop);
// Invoke the function.
Label call_proxy;
__ mov(eax, ecx);
ParameterCount actual(eax);
__ SmiUntag(eax);
__ mov(edi, Operand(ebp, kFunctionOffset));
@ -899,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -1002,7 +1003,8 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch, elements_array);
__ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@ -1273,11 +1275,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ONLY_ELEMENTS,
FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,

90
deps/v8/src/ia32/code-stubs-ia32.cc

@ -1681,6 +1681,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
}
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
// Output:
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == BinaryOpIC::INT32);
@ -1690,13 +1695,18 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
case Token::DIV:
case Token::MOD: {
Label not_floats;
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
if (op_ == Token::MOD) {
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
} else {
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
@ -1715,6 +1725,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
}
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
FloatingPointHelper::LoadFloatOperands(
@ -1722,6 +1733,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ecx,
FloatingPointHelper::ARGS_IN_REGISTERS);
FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
if (op_ == Token::MOD) {
// The operands are now on the FPU stack, but we don't need them.
__ fstp(0);
__ fstp(0);
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
} else {
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
@ -1734,9 +1752,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
__ ffree();
__ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime);
}
}
__ bind(&not_floats);
__ bind(&not_int32);
@ -1744,10 +1763,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
break;
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
@ -1758,11 +1773,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_floats;
Label not_int32;
Label non_smi_result;
/* {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
}*/
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
use_sse3_,
&not_floats);
@ -1833,8 +1843,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE(); break;
}
// If an allocation fails, or SHR or MOD hit a hard case,
// use the runtime system to get the correct result.
// If an allocation fails, or SHR hits a hard case, use the runtime system to
// get the correct result.
__ bind(&call_runtime);
switch (op_) {
@ -1855,8 +1865,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
@ -1957,7 +1965,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
__ ffree();
__ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime);
}
@ -2161,7 +2169,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
__ ffree();
__ fstp(0); // Pop FPU stack before calling runtime.
__ jmp(&call_runtime);
}
__ bind(&not_floats);
@ -3814,20 +3822,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 8;
static const int kRegExpExecuteArguments = 9;
__ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 8: Pass current isolate address.
__ mov(Operand(esp, 7 * kPointerSize),
// Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
// Argument 7: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ mov(Operand(esp, 5 * kPointerSize), esi);
__ mov(Operand(esp, 6 * kPointerSize), esi);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
@ -3890,7 +3902,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
__ cmp(eax, 1);
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@ -5006,11 +5020,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ j(not_equal, &not_outermost_js, Label::kNear);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
Label cont;
__ jmp(&cont, Label::kNear);
__ jmp(&invoke, Label::kNear);
__ bind(&not_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@ -6162,7 +6174,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label not_original_string;
__ j(not_equal, &not_original_string, Label::kNear);
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
// Longer than original string's length or negative: unsafe arguments.
__ j(above, &runtime);
// Return original string.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@ -7047,8 +7063,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@ -7320,9 +7336,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(eax, &smi_element);
__ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
__ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7344,7 +7360,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ jmp(&slow_elements);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@ -7357,15 +7373,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ push(edx);

22
deps/v8/src/ia32/codegen-ia32.cc

@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
@ -397,9 +397,25 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
__ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
__ lea(esi, Operand(edi,
times_4,
FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
Label aligned, aligned_done;
__ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
__ j(zero, &aligned, Label::kNear);
__ mov(FieldOperand(eax, 0),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ add(eax, Immediate(kPointerSize));
__ jmp(&aligned_done);
__ bind(&aligned);
__ mov(Operand(eax, esi, times_1, -kPointerSize-1),
Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
__ bind(&aligned_done);
// eax: destination FixedDoubleArray
// edi: number of elements
// edx: receiver

39
deps/v8/src/ia32/debug-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -91,9 +91,11 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
#define __ ACCESS_MASM(masm)
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
@ -103,6 +105,13 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
__ push(Immediate(Smi::FromInt(
Debug::FramePaddingLayout::kPaddingValue)));
}
__ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
@ -134,6 +143,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1);
__ CallStub(&ceb);
// Automatically find register that could be used after register restore.
// We need one register for padding skip instructions.
Register unused_reg = { -1 };
// Restore the register values containing object pointers from the
// expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
@ -142,14 +155,28 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
taken = true;
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
taken = true;
}
if (!taken) {
unused_reg = reg;
}
}
ASSERT(unused_reg.code() != -1);
// Read current padding counter and skip corresponding number of words.
__ pop(unused_reg);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
// Get rid of the internal frame.
}
@ -172,10 +199,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
// -- edx : receiver
// -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
}
@ -194,10 +221,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- eax : key
// -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
}

98
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// ok:
if (FLAG_count_based_interrupts) {
ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
} else {
ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
}
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@ -266,9 +266,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
@ -351,10 +351,12 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n",
PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
ast_id,
input_frame_size,
output_frame_size);
output_frame_size,
input_->GetRegister(ebp.code()),
input_->GetRegister(esp.code()));
}
// There's only one output frame in the OSR case.
@ -404,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
name = "function";
break;
}
PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
@ -415,6 +417,24 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_offset -= kPointerSize;
}
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & kPointerSize) != 0) {
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
int32_t alignment_state = (has_alignment_padding_ == 1) ?
kAlignmentPaddingPushed :
kNoAlignmentPadding;
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
output_offset,
alignment_state);
}
output_[0]->SetFrameSlot(output_offset, alignment_state);
output_offset -= kPointerSize;
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
@ -427,7 +447,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@ -688,24 +708,38 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
unsigned alignment_state_offset =
input_offset - parameter_count * kPointerSize -
StandardFrameConstants::kFixedFrameSize -
kPointerSize;
ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
JavaScriptFrameConstants::kLocal0Offset);
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
has_alignment_padding_ =
(alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
// 2 = context and function in the frame.
top_address =
input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
@ -747,13 +781,17 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
ASSERT(!is_bottommost ||
(input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
ASSERT(!is_bottommost || !has_alignment_padding_ ||
(fp_value & kPointerSize) != 0);
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
@ -948,6 +986,28 @@ void Deoptimizer::EntryGenerator::Generate() {
}
__ pop(eax);
if (type() != OSR) {
// If frame was dynamically aligned, pop padding.
Label no_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_padding);
__ pop(ecx);
if (FLAG_debug_code) {
__ cmp(ecx, Immediate(kAlignmentZapValue));
__ Assert(equal, "alignment marker expected");
}
__ bind(&no_padding);
} else {
// If frame needs dynamic alignment push padding.
Label no_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_padding);
__ push(Immediate(kAlignmentZapValue));
__ bind(&no_padding);
}
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the

6
deps/v8/src/ia32/frames-ia32.h

@ -53,6 +53,10 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
const int kNumSafepointRegisters = 8;
const int kNoAlignmentPadding = 0;
const int kAlignmentPaddingPushed = 2;
const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
// ----------------------------------------------------
@ -119,6 +123,8 @@ class JavaScriptFrameConstants : public AllStatic {
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
static const int kDynamicAlignmentStateOffset = kLocal0Offset;
};

307
deps/v8/src/ia32/full-codegen-ia32.cc

@ -101,13 +101,6 @@ class JumpPatchSite BASE_EMBEDDED {
};
// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
int FullCodeGenerator::self_optimization_header_size() {
UNREACHABLE();
return 13;
}
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@ -269,11 +262,11 @@ void FullCodeGenerator::Generate() {
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY);
ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
EmitDeclaration(proxy, proxy->var()->mode(), NULL);
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_HARMONY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
}
@ -763,60 +756,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
}
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
__ Check(not_equal, "Declaration in with context.");
__ cmp(ebx, isolate()->factory()->catch_context_map());
__ Check(not_equal, "Declaration in catch context.");
}
}
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
bool binding_needs_init = (function == NULL) &&
(mode == CONST || mode == CONST_HARMONY || mode == LET);
bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
switch (variable->location()) {
case Variable::UNALLOCATED:
++global_count_;
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
}
break;
case Variable::CONTEXT:
// The variable in the decl always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
__ Check(not_equal, "Declaration in with context.");
__ cmp(ebx, isolate()->factory()->catch_context_map());
__ Check(not_equal, "Declaration in catch context.");
}
if (function != NULL) {
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (binding_needs_init) {
Comment cmnt(masm_, "[ Declaration");
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
@ -825,14 +809,12 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
break;
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Declaration");
Comment cmnt(masm_, "[ VariableDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR ||
mode == CONST ||
mode == CONST_HARMONY ||
mode == LET);
// VariableDeclaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR || mode == LET ||
mode == CONST || mode == CONST_HARMONY);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
@ -840,9 +822,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (function != NULL) {
VisitForStackValue(function);
} else if (binding_needs_init) {
if (hole_init) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
} else {
__ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
@ -854,6 +834,118 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
}
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
__ mov(StackOperand(variable), result_register());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ push(esi);
__ push(Immediate(variable->name()));
__ push(Immediate(Smi::FromInt(NONE)));
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(ContextOperand(esi, variable->index()), Immediate(instance));
Visit(declaration->module());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED:
// TODO(rossberg)
break;
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
// TODO(rossberg)
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
@ -1194,7 +1286,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
__ mov(eax, GlobalObjectOperand());
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@ -1278,7 +1370,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
__ mov(eax, GlobalObjectOperand());
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@ -1465,7 +1557,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
@ -1557,7 +1649,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
bool has_constant_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1568,7 +1661,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@ -1580,10 +1673,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1611,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) {
// Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
// transition and don't need to call the runtime stub.
if (IsFastObjectElementsKind(constant_elements_kind)) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
@ -1672,9 +1764,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
__ push(result_register());
// We need the receiver both on the stack and in edx.
VisitForStackValue(property->obj());
__ mov(edx, Operand(esp, 0));
} else {
VisitForStackValue(property->obj());
}
@ -1682,9 +1774,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
VisitForStackValue(property->key());
__ mov(edx, Operand(esp, kPointerSize)); // Object.
__ mov(ecx, Operand(esp, 0)); // Key.
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@ -1927,7 +2019,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
__ pop(edx); // Receiver.
__ pop(eax); // Restore value.
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
@ -2033,6 +2125,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
// eax : value
// esp[0] : receiver
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
@ -2075,6 +2170,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// eax : value
// esp[0] : key
// esp[kPointerSize] : receiver
// If the assignment starts a block of assignments to the same object,
// change to slow case to avoid the quadratic behavior of repeatedly
@ -2087,7 +2185,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register());
}
__ pop(ecx);
__ pop(ecx); // Key.
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
@ -2120,12 +2218,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
__ mov(edx, result_register());
EmitNamedPropertyLoad(expr);
context()->Plug(eax);
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ pop(edx);
__ pop(edx); // Object.
__ mov(ecx, result_register()); // Key.
EmitKeyedPropertyLoad(expr);
context()->Plug(eax);
}
@ -3924,15 +4024,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(Immediate(Smi::FromInt(0)));
}
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
// Put the object both on the stack and in edx.
VisitForAccumulatorValue(prop->obj());
__ push(eax);
__ mov(edx, eax);
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
VisitForStackValue(prop->key());
__ mov(edx, Operand(esp, kPointerSize)); // Object.
__ mov(ecx, Operand(esp, 0)); // Key.
EmitKeyedPropertyLoad(prop);
}
}
@ -4079,7 +4180,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
__ mov(eax, GlobalObjectOperand());
__ mov(edx, GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@ -4344,7 +4445,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope()) {
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the global context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
@ -4374,14 +4476,49 @@ void FullCodeGenerator::EnterFinallyBlock() {
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
__ push(edx);
// Store result register while executing finally block.
__ push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(edx, Operand::StaticVariable(has_pending_message));
__ push(edx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_script));
__ push(edx);
}
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore pending message from stack.
__ pop(edx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(Operand::StaticVariable(pending_message_script), edx);
__ pop(edx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(Operand::StaticVariable(has_pending_message), edx);
__ pop(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save