Browse Source

V8: Upgrade to 3.13.7.1

v0.9.3-release
isaacs 12 years ago
committed by Bert Belder
parent
commit
3411a03dd1
  1. 5
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 327
      deps/v8/ChangeLog
  4. 89
      deps/v8/Makefile
  5. 92
      deps/v8/Makefile.android
  6. 84
      deps/v8/build/android.gypi
  7. 62
      deps/v8/build/common.gypi
  8. 11
      deps/v8/build/standalone.gypi
  9. 10
      deps/v8/include/v8-debug.h
  10. 7
      deps/v8/include/v8-preparser.h
  11. 27
      deps/v8/include/v8-profiler.h
  12. 7
      deps/v8/include/v8-testing.h
  13. 132
      deps/v8/include/v8.h
  14. 6
      deps/v8/preparser/preparser-process.cc
  15. 38
      deps/v8/samples/lineprocessor.cc
  16. 14
      deps/v8/samples/process.cc
  17. 2
      deps/v8/samples/shell.cc
  18. 29
      deps/v8/src/SConscript
  19. 69
      deps/v8/src/accessors.cc
  20. 4
      deps/v8/src/accessors.h
  21. 243
      deps/v8/src/api.cc
  22. 139
      deps/v8/src/api.h
  23. 5
      deps/v8/src/arm/assembler-arm-inl.h
  24. 171
      deps/v8/src/arm/assembler-arm.cc
  25. 48
      deps/v8/src/arm/assembler-arm.h
  26. 65
      deps/v8/src/arm/builtins-arm.cc
  27. 372
      deps/v8/src/arm/code-stubs-arm.cc
  28. 16
      deps/v8/src/arm/code-stubs-arm.h
  29. 37
      deps/v8/src/arm/codegen-arm.cc
  30. 5
      deps/v8/src/arm/constants-arm.h
  31. 174
      deps/v8/src/arm/deoptimizer-arm.cc
  32. 235
      deps/v8/src/arm/full-codegen-arm.cc
  33. 2
      deps/v8/src/arm/ic-arm.cc
  34. 237
      deps/v8/src/arm/lithium-arm.cc
  35. 136
      deps/v8/src/arm/lithium-arm.h
  36. 503
      deps/v8/src/arm/lithium-codegen-arm.cc
  37. 51
      deps/v8/src/arm/lithium-codegen-arm.h
  38. 207
      deps/v8/src/arm/macro-assembler-arm.cc
  39. 28
      deps/v8/src/arm/macro-assembler-arm.h
  40. 9
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  41. 88
      deps/v8/src/arm/simulator-arm.cc
  42. 36
      deps/v8/src/arm/simulator-arm.h
  43. 312
      deps/v8/src/arm/stub-cache-arm.cc
  44. 151
      deps/v8/src/array.js
  45. 51
      deps/v8/src/assembler.cc
  46. 15
      deps/v8/src/assembler.h
  47. 60
      deps/v8/src/ast.cc
  48. 401
      deps/v8/src/ast.h
  49. 793
      deps/v8/src/bootstrapper.cc
  50. 4
      deps/v8/src/bootstrapper.h
  51. 43
      deps/v8/src/builtins.cc
  52. 10
      deps/v8/src/builtins.h
  53. 8
      deps/v8/src/checks.h
  54. 24
      deps/v8/src/code-stubs.cc
  55. 45
      deps/v8/src/code-stubs.h
  56. 27
      deps/v8/src/collection.js
  57. 38
      deps/v8/src/compilation-cache.cc
  58. 21
      deps/v8/src/compilation-cache.h
  59. 557
      deps/v8/src/compiler.cc
  60. 198
      deps/v8/src/compiler.h
  61. 53
      deps/v8/src/contexts.cc
  62. 64
      deps/v8/src/contexts.h
  63. 36
      deps/v8/src/conversions-inl.h
  64. 11
      deps/v8/src/conversions.h
  65. 26
      deps/v8/src/counters.cc
  66. 60
      deps/v8/src/counters.h
  67. 2
      deps/v8/src/cpu-profiler.h
  68. 495
      deps/v8/src/d8.cc
  69. 24
      deps/v8/src/d8.h
  70. 41
      deps/v8/src/date.js
  71. 38
      deps/v8/src/debug-debugger.js
  72. 335
      deps/v8/src/debug.cc
  73. 23
      deps/v8/src/debug.h
  74. 290
      deps/v8/src/deoptimizer.cc
  75. 33
      deps/v8/src/deoptimizer.h
  76. 4
      deps/v8/src/disassembler.cc
  77. 2
      deps/v8/src/elements.cc
  78. 59
      deps/v8/src/execution.cc
  79. 5
      deps/v8/src/execution.h
  80. 153
      deps/v8/src/extensions/statistics-extension.cc
  81. 49
      deps/v8/src/extensions/statistics-extension.h
  82. 164
      deps/v8/src/factory.cc
  83. 35
      deps/v8/src/factory.h
  84. 51
      deps/v8/src/flag-definitions.h
  85. 14
      deps/v8/src/flags.cc
  86. 24
      deps/v8/src/frames.cc
  87. 2
      deps/v8/src/frames.h
  88. 132
      deps/v8/src/full-codegen.cc
  89. 46
      deps/v8/src/full-codegen.h
  90. 15
      deps/v8/src/globals.h
  91. 30
      deps/v8/src/handles-inl.h
  92. 156
      deps/v8/src/handles.cc
  93. 36
      deps/v8/src/handles.h
  94. 8
      deps/v8/src/hashmap.h
  95. 41
      deps/v8/src/heap-inl.h
  96. 653
      deps/v8/src/heap.cc
  97. 198
      deps/v8/src/heap.h
  98. 196
      deps/v8/src/hydrogen-instructions.cc
  99. 323
      deps/v8/src/hydrogen-instructions.h
  100. 1933
      deps/v8/src/hydrogen.cc

5
deps/v8/.gitignore

@ -9,9 +9,11 @@
*.pdb
*.pyc
*.scons*
*.sln
*.so
*.suo
*.user
*.vcproj
*.xcodeproj
#*#
*~
@ -20,13 +22,16 @@ d8
d8_g
shell
shell_g
/build/Debug
/build/gyp
/build/Release
/obj/
/out/
/test/es5conform/data
/test/mozilla/data
/test/sputnik/sputniktests
/test/test262/data
/third_party
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug

3
deps/v8/AUTHORS

@ -24,9 +24,11 @@ Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
@ -46,6 +48,7 @@ Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>

327
deps/v8/ChangeLog

@ -1,3 +1,330 @@
2012-09-11: Version 3.13.7
Enable/disable LiveEdit using the (C++) debug API.
Performance and stability improvements on all platforms.
2012-09-06: Version 3.13.6
Added validity checking to API functions and calls.
Disabled accessor inlining (Chromium issue 134609).
Fixed bug in Math.min/max in optimized code (Chromium issue 145961).
Directly use %ObjectKeys in json stringify (Chromium issue 2312).
Fixed VS2005 build (issue 2313).
Activated fixed ES5 readonly semantics by default.
Added hardfp flag to the Makefile.
Performance and stability improvements on all platforms.
2012-08-29: Version 3.13.5
Release stack trace data after firing Error.stack accessor.
(issue 2308)
Added a new API V8::SetJitCodeEventHandler to push code name and
location to users such as profilers.
Allocate block-scoped global bindings to global context.
Performance and stability improvements on all platforms.
2012-08-28: Version 3.13.4
Print reason for disabling optimization. Kill --trace-bailout flag.
Provided option to disable full DEBUG build on Android.
Introduced global contexts to represent lexical global scope(s).
Fixed rounding in Uint8ClampedArray setter. (issue 2294)
Performance and stability improvements on all platforms.
2012-08-21: Version 3.13.3
Performance and stability improvements on all platforms.
2012-08-20: Version 3.13.2
Performance and stability improvements on all platforms.
2012-08-16: Version 3.13.1
Performance and stability improvements on all platforms.
2012-08-10: Version 3.13.0
Added histograms for total allocated/live heap size, as well as
allocated size and percentage of total for map and cell space.
Fixed parseInt's octal parsing behavior (ECMA-262 Annex E 15.1.2.2).
(issue 1645)
Added checks for interceptors to negative lookup code in Crankshaft.
(Chromium issue 140473)
Made incremental marking clear ICs and type feedback cells.
Performance and stability improvements on all platforms.
2012-08-01: Version 3.12.19
Performance and stability improvements on all platforms.
2012-07-30: Version 3.12.18
Forced using bit-pattern for signed zero double. (issue 2239)
Made sure double to int conversion is correct. (issue 2260)
Performance and stability improvements on all platforms.
2012-07-27: Version 3.12.17
Always set the callee's context when calling a function from optimized
code.
(Chromium issue 138887)
Fixed building with GCC 3.x
(issue 2016, 2017)
Improved API calls that return empty handles.
(issue 2245)
Performance and stability improvements on all platforms.
2012-07-25: Version 3.12.16
Performance and stability improvements on all platforms.
2012-07-24: Version 3.12.15
Added PRESERVE_ASCII_NULL option to String::WriteAscii.
(issue 2252)
Added dependency to HLoadKeyed* instructions to prevent invalid
hoisting. (Chromium issue 137768)
Enabled building d8 for Android on Mac.
Interpret negative hexadecimal literals as NaN.
(issue 2240)
Expose counters in javascript when using --track-gc-object-stats.
Enabled building and testing V8 on Android IA.
Added --trace-parse flag to parser.
Performance and stability improvements on all platforms.
2012-07-18: Version 3.12.14
Deactivated optimization of packed arrays.
(Chromium issue 137768)
Fixed broken accessor transition.
(Chromium issue 137689)
Performance and stability improvements on all platforms.
2012-07-17: Version 3.12.13
Fixed missing tagging of stack value in finally block.
(Chromium issue 137496)
Added more support for heap analysis.
Performance and stability improvements on all platforms.
2012-07-16: Version 3.12.12
Added an option to the tickprocessor to specify the directory for lib
lookup.
Fixed ICs for slow objects with native accessor (Chromium issue 137002).
Fixed transcendental cache on ARM in optimized code (issue 2234).
New heap inspection tools: counters for object sizes and counts,
histograms for external fragmentation.
Incorporated constness into inferred interfaces (in preparation for
handling imports) (issue 1569).
Performance and stability improvements on all platforms.
2012-07-12: Version 3.12.11
Renamed "mips" arch to "mipsel" in the GYP build.
Fixed computation of call targets on prototypes in Crankshaft.
(Chromium issue 125148)
Removed use of __lookupGetter__ when generating stack trace.
(issue 1591)
Turned on ES 5.2 globals semantics by default.
(issue 1991, Chromium issue 80591)
Synced preparser and parser wrt syntax error in switch..case.
(issue 2210)
Fixed reporting of octal literals in strict mode when preparsing.
(issue 2220)
Fixed inline constructors for Harmony Proxy prototypes.
(issue 2225)
Performance and stability improvements on all platforms.
2012-07-10: Version 3.12.10
Re-enabled and fixed issue with array bounds check elimination
(Chromium issue 132114).
Fixed Debug::Break crash. (Chromium issue 131642)
Added optimizing compiler support for JavaScript getters.
Performance and stability improvements on all platforms.
2012-07-06: Version 3.12.9
Correctly advance the scanner when scanning unicode regexp flag.
(Chromium issue 136084)
Fixed unhandlified code calling Harmony Proxy traps.
(issue 2219)
Performance and stability improvements on all platforms.
2012-07-05: Version 3.12.8
Implemented TypedArray.set and ArrayBuffer.slice in d8.
Performance and stability improvements on all platforms.
2012-07-03: Version 3.12.7
Fixed lazy compilation for strict eval scopes.
(Chromium issue 135066)
Made MACOSX_DEPLOYMENT_TARGET configurable in GYP.
(issue 2151)
Report "hidden properties" in heap profiler for properties case.
(issue 2212)
Activated optimization of packed arrays by default.
Performance and stability improvements on all platforms.
2012-06-29: Version 3.12.6
Cleaned up hardfp ABI detection for ARM (V8 issue 2140).
Extended TypedArray support in d8.
2012-06-28: Version 3.12.5
Fixed lazy parsing heuristics to respect outer scope.
(Chromium issue 135008)
Allow using test-wrapper-gypbuild.py on Windows when no python
interpreter is registered.
Performance and stability improvements on all platforms.
2012-06-27: Version 3.12.4
Removed -fomit-frame-pointer flag from Release builds to make
the stack walkable by TCMalloc (Chromium issue 133723).
Ported r7868 (constant masking) to x64 (issue 1374).
Expose more detailed memory statistics (issue 2201).
Fixed Harmony Maps and WeakMaps for undefined values
(Chromium issue 132744).
Correctly throw reference error in strict mode with ICs disabled
(issue 2119).
Performance and stability improvements on all platforms.
2012-06-25: Version 3.12.3
Reverted r11835 'Unify promotion and allocation limit computation' due
to V8 Splay performance regression on Mac. (Chromium issue 134183)
Fixed sharing of literal boilerplates for optimized code. (issue 2193)
Performance and stability improvements on all platforms.
2012-06-22: Version 3.12.2
Made near-jump check more strict in LoadNamedFieldPolymorphic on
ia32/x64. (Chromium issue 134055)
Fixed lazy sweeping heuristics to prevent old-space expansion.
(issue 2194)
Performance and stability improvements on all platforms.
2012-06-21: Version 3.12.1
Performance and stability improvements on all platforms.
2012-06-20: Version 3.12.0
Fixed Chromium issues:
115100, 129628, 131994, 132727, 132741, 132742, 133211
Fixed V8 issues:
915, 1914, 2034, 2087, 2094, 2134, 2156, 2166, 2172, 2177, 2179, 2185
Added --extra-code flag to mksnapshot to load JS code into the VM
before creating the snapshot.
Support 'restart call frame' command in the debugger.
Performance and stability improvements on all platforms.
2012-06-13: Version 3.11.10
Implemented heap profiler memory usage reporting.

89
deps/v8/Makefile

@ -34,7 +34,8 @@ TESTJOBS ?= -j16
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_TOOL_PREFIX = $(ANDROID_NDK_ROOT)/toolchain/bin/arm-linux-androideabi
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/v8
# Special build flags. Use them like this: "make library=shared"
@ -61,6 +62,13 @@ endif
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
GYPFLAGS += -Dv8_enable_extra_checks=1
endif
ifeq ($(extrachecks), off)
GYPFLAGS += -Dv8_enable_extra_checks=0
endif
# gdbjit=on
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
@ -95,6 +103,14 @@ endif
ifeq ($(strictaliasing), off)
GYPFLAGS += -Dv8_no_strict_aliasing=1
endif
# regexp=interpreted
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
# hardfp=on
ifeq ($(hardfp), on)
GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
@ -103,7 +119,7 @@ endif
# - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM (release mode)
# - "android": cross-compile for Android/ARM
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@ -113,9 +129,10 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm mips
ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
@ -124,15 +141,19 @@ GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
must-set-ANDROID_NDK_ROOT
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN
# Target definitions. "all" is the default.
all: $(MODES)
@ -143,6 +164,10 @@ buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
mips mips.release mips.debug:
@echo "V8 does not support big-endian MIPS builds at the moment," \
"please use little-endian builds (mipsel)."
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@ -162,17 +187,15 @@ native: $(OUTDIR)/Makefile.native
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# TODO(jkummerow): add "android.debug" when we need it.
android android.release: $(OUTDIR)/Makefile.android
@$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
CXX="$(ANDROID_TOOL_PREFIX)-g++" \
AR="$(ANDROID_TOOL_PREFIX)-ar" \
RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
CC="$(ANDROID_TOOL_PREFIX)-gcc" \
LD="$(ANDROID_TOOL_PREFIX)-ld" \
LINK="$(ANDROID_TOOL_PREFIX)-g++" \
BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/android.release"
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN Makefile.android
@$(MAKE) -f Makefile.android $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
# Test targets.
check: all
@ -192,12 +215,25 @@ $(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@tools/android-sync.sh $(basename $@) $(OUTDIR) \
$(shell pwd) $(ANDROID_V8)
$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
--special-command="tools/android-run.py @"
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)) android.clean:
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@ -208,11 +244,11 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean
# GYP file generation targets.
MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(MAKEFILES): $(GYPFILES) $(ENVFILE)
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
@ -224,18 +260,11 @@ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT:
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
ifndef ANDROID_NDK_ROOT
$(error ANDROID_NDK_ROOT is not set)
ifndef ANDROID_TOOLCHAIN
$(error ANDROID_NDK_ROOT or ANDROID_TOOLCHAIN must be set))
endif
endif
# Replaces the old with the new environment file if they're different, which

92
deps/v8/Makefile.android

@ -0,0 +1,92 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
# e.g. "android_ia32.release" or "android_arm.release"
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-x86
else
ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-x86
else
$(error Host platform "${HOST_OS}" is not supported)
endif
endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.4.3
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86-4.4.3
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}")
endif
# For mksnapshot host generation.
DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_ARCHES))
$(ANDROID_MAKEFILES):
@GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.${ARCH} ${GYPFLAGS}

84
deps/v8/build/android.gypi

@ -33,35 +33,40 @@
'variables': {
# Location of Android NDK.
'variables': {
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_target_arch%': 'arm', # target_arch in android terms.
# Switch between different build types, currently only '0' is
# supported.
'android_build_type%': 0,
},
'android_ndk_root%': '<(android_ndk_root)',
'android_ndk_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
'android_build_type%': '<(android_build_type)',
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
# Switch between different build types, currently only '0' is
# supported.
'android_build_type%': 0,
},
'android_ndk_root%': '<(android_ndk_root)',
'android_ndk_sysroot': '<(android_ndk_sysroot)',
'android_ndk_include': '<(android_ndk_sysroot)/usr/include',
'android_ndk_lib': '<(android_ndk_sysroot)/usr/lib',
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'android_lib': '<(android_sysroot)/usr/lib',
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'android_lib': '<(android_sysroot)/usr/lib',
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
# Enable to use the system stlport, otherwise statically
# link the NDK one?
'use_system_stlport%': '<(android_build_type)',
'android_stlport_library': 'stlport_static',
# Copy it out one scope.
'android_build_type%': '<(android_build_type)',
'OS': 'android',
'target_arch': 'arm',
'v8_target_arch': 'arm',
'armv7': 1,
'arm_neon': 0,
'arm_fpu': 'vfpv3',
}, # variables
'target_defaults': {
'defines': [
@ -100,10 +105,7 @@
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_ndk_include)',
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
'-I<(android_include)',
],
'defines': [
'ANDROID',
@ -120,7 +122,6 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,--icf=safe', # Enable identical code folding to reduce size
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
@ -144,8 +145,21 @@
'conditions': [
['android_build_type==0', {
'ldflags': [
'-Wl,-rpath-link=<(android_ndk_lib)',
'-L<(android_ndk_lib)',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
}],
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and armv7==1', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
}],
# NOTE: The stlport header include paths below are specified in
@ -156,22 +170,22 @@
# The include ordering here is important; change with caution.
['use_system_stlport==0', {
'cflags': [
'-I<(android_ndk_root)/sources/cxx-stl/stlport/stlport',
'-I<(android_stlport_include)',
],
'conditions': [
['target_arch=="arm" and armv7==1', {
'ldflags': [
'-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi-v7a',
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and armv7==0', {
'ldflags': [
'-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi',
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="ia32"', {
'ldflags': [
'-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/x86',
'-L<(android_stlport_libs)/x86',
],
}],
],
@ -194,12 +208,12 @@
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_ndk_lib)/crtbegin_dynamic.o',
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_ndk_lib)/crtend_android.o',
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
@ -222,4 +236,4 @@
}],
], # target_conditions
}, # target_defaults
}
}

62
deps/v8/build/common.gypi

@ -48,7 +48,8 @@
# both for the snapshot and for the ARM target. Leaving the default value
# of 'false' will avoid VFP instructions in the snapshot and use CPU feature
# probing when running on the target.
'v8_can_use_vfp_instructions%': 'false',
'v8_can_use_vfp2_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
@ -69,6 +70,9 @@
'v8_enable_disassembler%': 0,
# Enable extra checks in API functions and other strategic places.
'v8_enable_extra_checks%': 1,
'v8_object_print%': 0,
'v8_enable_gdbjit%': 0,
@ -95,6 +99,10 @@
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
'v8_interpreted_regexp%': 0,
},
'target_defaults': {
'conditions': [
@ -104,12 +112,18 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@ -125,9 +139,14 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
[ 'v8_can_use_vfp_instructions=="true"', {
[ 'v8_can_use_vfp2_instructions=="true"', {
'defines': [
'CAN_USE_VFP_INSTRUCTIONS',
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
[ 'v8_can_use_vfp3_instructions=="true"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'v8_use_arm_eabi_hardfloat=="true"', {
@ -152,12 +171,12 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="mips"', {
['v8_target_arch=="mipsel"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
@ -207,7 +226,7 @@
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
],
}], # v8_target_arch=="mips"
}], # v8_target_arch=="mipsel"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
@ -220,6 +239,7 @@
'StackReserveSize': '2097152',
},
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', {
'defines': [
@ -239,6 +259,7 @@
'WIN32',
],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
@ -264,7 +285,7 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
@ -323,19 +344,36 @@
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
['OS=="android"', {
'variables': {
'android_full_debug%': 1,
},
'conditions': [
['android_full_debug==0', {
# Disable full debug if we want a faster v8 in a debug build.
# TODO(2304): pass DISABLE_DEBUG_ASSERT instead of hiding DEBUG.
'defines!': [
'DEBUG',
],
}],
],
}],
],
}, # Debug
'Release': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-fomit-frame-pointer',
'-O3',
],
'conditions': [
@ -365,15 +403,17 @@
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'OmitFramePointers': 'true',
'StringPooling': 'true',
'conditions': [
['OS=="win" and component=="shared_library"', {
'RuntimeLibrary': '2', #/MD
}, {
'RuntimeLibrary': '0', #/MT
}],
['v8_target_arch=="x64"', {
# TODO(2207): remove this option once the bug is fixed.
'WholeProgramOptimization': 'true',
}],
],
},
'VCLinkerTool': {

11
deps/v8/build/standalone.gypi

@ -33,6 +33,7 @@
'component%': 'static_library',
'visibility%': 'hidden',
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
'variables': {
'variables': {
@ -45,7 +46,7 @@
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@ -66,8 +67,9 @@
'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mips" and host_arch!="mips") or \
(v8_target_arch=="x64" and host_arch!="x64")', {
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android")', {
'want_separate_host_toolset': 1,
}, {
'want_separate_host_toolset': 0,
@ -191,7 +193,8 @@
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
'GCC_VERSION': '4.2',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)',
'PREBINDING': 'NO', # No -Wl,-prebind
'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO',

10
deps/v8/include/v8-debug.h

@ -321,7 +321,7 @@ class EXPORT Debug {
* \endcode
*/
static Local<Value> Call(v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>());
Handle<Value> data = Handle<Value>());
/**
* Returns a mirror object for the given object.
@ -388,6 +388,14 @@ class EXPORT Debug {
* to change.
*/
static Local<Context> GetDebugContext();
/**
* Enable/disable LiveEdit functionality for the given Isolate
* (default Isolate if not provided). V8 will abort if LiveEdit is
* unexpectedly used. LiveEdit is enabled by default.
*/
static void SetLiveEditEnabled(bool enable, Isolate* isolate = NULL);
};

7
deps/v8/include/v8-preparser.h

@ -55,11 +55,12 @@
// Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
(__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#else
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif
#endif // _WIN32

27
deps/v8/include/v8-profiler.h

@ -50,11 +50,12 @@
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
(__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#else
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif
#endif // _WIN32
@ -280,32 +281,12 @@ class V8EXPORT HeapGraphNode {
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
/**
* Returns node's retained size, in bytes. That is, self + sizes of
* the objects that are reachable only from this object. In other
* words, the size of memory that will be reclaimed having this node
* collected.
*/
int GetRetainedSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child by index. */
const HeapGraphEdge* GetChild(int index) const;
/** Returns retainer nodes count of the node. */
int GetRetainersCount() const;
/** Returns a retainer by index. */
const HeapGraphEdge* GetRetainer(int index) const;
/**
* Returns a dominator node. This is the node that participates in every
* path from the snapshot root to the current node.
*/
const HeapGraphNode* GetDominatorNode() const;
/**
* Finds and returns a value from the heap corresponding to this node,
* if the value is still reachable.

7
deps/v8/include/v8-testing.h

@ -50,11 +50,12 @@
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
(__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#else
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif
#endif // _WIN32

132
deps/v8/include/v8.h

@ -63,15 +63,16 @@
#else // _WIN32
// Setup for Linux shared library export.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#if defined(__GNUC__) && ((__GNUC__ >= 4) || \
(__GNUC__ == 3 && __GNUC_MINOR__ >= 3)) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#else
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif
#endif // _WIN32
@ -1064,7 +1065,8 @@ class String : public Primitive {
enum WriteOptions {
NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1,
NO_NULL_TERMINATION = 2
NO_NULL_TERMINATION = 2,
PRESERVE_ASCII_NULL = 4
};
// 16-bit character codes.
@ -1550,6 +1552,12 @@ class Object : public Value {
*/
V8EXPORT Local<String> ObjectProtoToString();
/**
* Returns the function invoked as a constructor for this object.
* May be the null value.
*/
V8EXPORT Local<Value> GetConstructor();
/**
* Returns the name of the function invoked as a constructor for this object.
*/
@ -2909,15 +2917,84 @@ typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
* When invoked, return_addr_location will point to a location on stack where
* a machine return address resides, this function should return either the
* same pointer, or a pointer to the profiler's copy of the original return
* address.
*
* \param return_addr_location points to a location on stack where a machine
* return address resides.
* \returns either return_addr_location, or else a pointer to the profiler's
* copy of the original return address.
*
* \note the resolver function must not cause garbage collection.
*/
typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location);
/**
* FunctionEntryHook is the type of the profile entry hook called at entry to
* any generated function when function-level profiling is enabled.
*
* \param function the address of the function that's being entered.
* \param return_addr_location points to a location on stack where the machine
* return address resides. This can be used to identify the caller of
* \p function, and/or modified to divert execution when \p function exits.
*
* \note the entry hook must not cause garbage collection.
*/
typedef void (*FunctionEntryHook)(uintptr_t function,
uintptr_t return_addr_location);
/**
* A JIT code event is issued each time code is added, moved or removed.
*
* \note removal events are not currently issued.
*/
struct JitCodeEvent {
enum EventType {
CODE_ADDED,
CODE_MOVED,
CODE_REMOVED
};
// Type of event.
EventType type;
// Start of the instructions.
void* code_start;
// Size of the instructions.
size_t code_len;
union {
// Only valid for CODE_ADDED.
struct {
// Name of the object associated with the code, note that the string is
// not zero-terminated.
const char* str;
// Number of chars in str.
size_t len;
} name;
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
};
};
/**
* Option flags passed to the SetJitCodeEventHandler function.
*/
enum JitCodeEventOptions {
kJitCodeEventDefault = 0,
// Generate callbacks for already existent code.
kJitCodeEventEnumExisting = 1
};
/**
* Callback function passed to SetJitCodeEventHandler.
*
* \param event code add, move or removal event.
*/
typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
/**
* Interface for iterating though all external resources in the heap.
*/
@ -3178,6 +3255,43 @@ class V8EXPORT V8 {
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver);
/**
* Allows the host application to provide the address of a function that's
* invoked on entry to every V8-generated function.
* Note that \p entry_hook is invoked at the very start of each
* generated function.
*
* \param entry_hook a function that will be invoked on entry to every
* V8-generated function.
* \returns true on success on supported platforms, false on failure.
* \note Setting a new entry hook function when one is already active will
* fail.
*/
static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
/**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*
* \param options options for the JIT code event handler.
* \param event_handler the JIT code event handler, which will be invoked
* each time code is added, moved or removed.
* \note \p event_handler won't get notified of existent code.
* \note since code removal notifications are not currently issued, the
* \p event_handler may get notifications of code that overlaps earlier
* code notifications. This happens when code areas are reused, and the
* earlier overlapping code areas should therefore be discarded.
* \note the events passed to \p event_handler and the strings they point to
* are not guaranteed to live past each call. The \p event_handler must
* copy strings and other parameters it needs to keep around.
* \note the set of events declared in JitCodeEvent::EventType is expected to
* grow over time, and the JitCodeEvent structure is expected to accrue
* new members. The \p event_handler function must ignore event codes
* it does not recognize to maintain future compatibility.
*/
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
/**
* Adjusts the amount of registered external memory. Used to give
* V8 an indication of the amount of externally allocated memory
@ -3928,7 +4042,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptySymbolRootIndex = 128;
static const int kEmptySymbolRootIndex = 116;
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;

6
deps/v8/preparser/preparser-process.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -202,7 +202,7 @@ void fail(v8::PreParserData* data, const char* message, ...) {
fflush(stderr);
if (data != NULL) {
// Print preparser data to stdout.
uint32_t size = data->size();
uint32_t size = static_cast<uint32_t>(data->size());
fprintf(stderr, "LOG: data size: %u\n", size);
if (!WriteBuffer(stdout, data->data(), size)) {
perror("ERROR: Writing data");
@ -232,7 +232,7 @@ struct ExceptionExpectation {
void CheckException(v8::PreParserData* data,
ExceptionExpectation* expects) {
PreparseDataInterpreter reader(data->data(), data->size());
PreparseDataInterpreter reader(data->data(), static_cast<int>(data->size()));
if (expects->throws) {
if (!reader.throws()) {
if (expects->type == NULL) {

38
deps/v8/samples/lineprocessor.cc

@ -25,19 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This controls whether this sample is compiled with debugger support.
// You may trace its usages in source text to see what parts of program
// are responsible for debugging support.
// Note that V8 itself should be compiled with enabled debugger support
// to have it all working.
#define SUPPORT_DEBUGGING
#include <v8.h>
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
#include <v8-debug.h>
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
#include <fcntl.h>
#include <string.h>
@ -116,7 +108,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions);
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Persistent<v8::Context> debug_message_context;
void DispatchDebugMessages() {
@ -135,7 +127,7 @@ void DispatchDebugMessages() {
v8::Debug::ProcessDebugMessages();
}
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
int RunMain(int argc, char* argv[]) {
@ -146,11 +138,11 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::Value> script_name(NULL);
int script_param_counter = 0;
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
int port_number = -1;
bool wait_for_connection = false;
bool support_callback = false;
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
MainCycleType cycle_type = CycleInCpp;
@ -164,7 +156,7 @@ int RunMain(int argc, char* argv[]) {
cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (strcmp(str, "--callback") == 0) {
support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
@ -172,7 +164,7 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); // NOLINT
i++;
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@ -219,7 +211,7 @@ int RunMain(int argc, char* argv[]) {
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_message_context = v8::Persistent<v8::Context>::New(context);
v8::Locker locker;
@ -231,7 +223,7 @@ int RunMain(int argc, char* argv[]) {
if (port_number != -1) {
v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
}
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
bool report_exceptions = true;
@ -272,9 +264,9 @@ int RunMain(int argc, char* argv[]) {
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions) {
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Locker lock;
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
v8::Handle<v8::Value> process_val =
@ -347,7 +339,7 @@ v8::Handle<v8::String> ReadFile(const char* name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = fread(&chars[i], 1, size - i, file);
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
@ -427,9 +419,9 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
#ifdef SUPPORT_DEBUGGING
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Unlocker unlocker;
#endif
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {

14
deps/v8/samples/process.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -351,7 +351,7 @@ Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
return String::New(value.c_str(), value.length());
return String::New(value.c_str(), static_cast<int>(value.length()));
}
@ -443,7 +443,7 @@ Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
return String::New(path.c_str(), path.length());
return String::New(path.c_str(), static_cast<int>(path.length()));
}
@ -451,7 +451,7 @@ Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
return String::New(path.c_str(), path.length());
return String::New(path.c_str(), static_cast<int>(path.length()));
}
@ -459,7 +459,7 @@ Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
return String::New(path.c_str(), path.length());
return String::New(path.c_str(), static_cast<int>(path.length()));
}
@ -467,7 +467,7 @@ Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
const AccessorInfo& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
return String::New(path.c_str(), path.length());
return String::New(path.c_str(), static_cast<int>(path.length()));
}
@ -557,7 +557,7 @@ Handle<String> ReadFile(const string& name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = fread(&chars[i], 1, size - i, file);
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);

2
deps/v8/samples/shell.cc

@ -205,7 +205,7 @@ v8::Handle<v8::String> ReadFile(const char* name) {
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = fread(&chars[i], 1, size - i, file);
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);

29
deps/v8/src/SConscript

@ -43,8 +43,8 @@ SOURCES = {
assembler.cc
ast.cc
atomicops_internals_x86_gcc.cc
bignum.cc
bignum-dtoa.cc
bignum.cc
bootstrapper.cc
builtins.cc
cached-powers.cc
@ -67,27 +67,30 @@ SOURCES = {
disassembler.cc
diy-fp.cc
dtoa.cc
elements.cc
elements-kind.cc
elements.cc
execution.cc
extensions/externalize-string-extension.cc
extensions/gc-extension.cc
extensions/statistics-extension.cc
factory.cc
fast-dtoa.cc
fixed-dtoa.cc
flags.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
gdb-jit.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
handles.cc
heap-profiler.cc
heap.cc
hydrogen.cc
hydrogen-instructions.cc
hydrogen.cc
ic.cc
incremental-marking.cc
interface.cc
inspector.cc
interface.cc
interpreter-irregexp.cc
isolate.cc
jsregexp.cc
@ -99,34 +102,37 @@ SOURCES = {
log.cc
mark-compact.cc
messages.cc
objects.cc
objects-printer.cc
objects-visiting.cc
objects.cc
once.cc
optimizing-compiler-thread.cc
parser.cc
preparser.cc
preparse-data.cc
preparser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
rewriter.cc
runtime.cc
runtime-profiler.cc
runtime.cc
safepoint-table.cc
scanner.cc
scanner-character-streams.cc
scanner.cc
scopeinfo.cc
scopes.cc
serialize.cc
snapshot-common.cc
spaces.cc
store-buffer.cc
string-search.cc
string-stream.cc
strtod.cc
stub-cache.cc
token.cc
transitions.cc
type-info.cc
unicode.cc
utils.cc
@ -137,10 +143,7 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
store-buffer.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc
"""),
'arch:arm': Split("""
arm/builtins-arm.cc

69
deps/v8/src/accessors.cc

@ -92,9 +92,9 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
ASSERT(Isolate::Current()->context()->global_context()->number_function()->
ASSERT(Isolate::Current()->context()->native_context()->number_function()->
has_initial_map());
Map* number_map = Isolate::Current()->context()->global_context()->
Map* number_map = Isolate::Current()->context()->native_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
@ -805,4 +805,69 @@ const AccessorDescriptor Accessors::ObjectPrototype = {
0
};
//
// Accessors::MakeModuleExport
//
static v8::Handle<v8::Value> ModuleGetExport(
v8::Local<v8::String> property,
const v8::AccessorInfo& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Isolate* isolate = instance->GetIsolate();
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
return v8::Handle<v8::Value>();
}
return v8::Utils::ToLocal(Handle<Object>(value));
}
static void ModuleSetExport(
v8::Local<v8::String> property,
v8::Local<v8::Value> value,
const v8::AccessorInfo& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* old_value = context->get(slot);
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Isolate* isolate = instance->GetIsolate();
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
return;
}
context->set(slot, *v8::Utils::OpenHandle(*value));
}
Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
Factory* factory = name->GetIsolate()->factory();
Handle<AccessorInfo> info = factory->NewAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
info->set_name(*name);
info->set_data(Smi::FromInt(index));
Handle<Object> getter = v8::FromCData(&ModuleGetExport);
Handle<Object> setter = v8::FromCData(&ModuleSetExport);
info->set_getter(*getter);
if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;
}
} } // namespace v8::internal

4
deps/v8/src/accessors.h

@ -85,6 +85,10 @@ class Accessors : public AllStatic {
void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetLength(Object* object, void*);

243
deps/v8/src/api.cc

@ -33,6 +33,7 @@
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
@ -540,7 +541,9 @@ Extension::Extension(const char* name,
source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) { }
auto_enable_(false) {
CHECK(source != NULL || source_length_ == 0);
}
v8::Handle<Primitive> Undefined() {
@ -767,8 +770,8 @@ void Context::SetData(v8::Handle<String> data) {
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
ASSERT(env->IsNativeContext());
if (env->IsNativeContext()) {
env->set_data(*raw_data);
}
}
@ -781,8 +784,8 @@ v8::Local<v8::Value> Context::GetData() {
return v8::Local<Value>();
}
i::Object* raw_result = NULL;
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
ASSERT(env->IsNativeContext());
if (env->IsNativeContext()) {
raw_result = env->data();
} else {
return Local<Value>();
@ -1066,7 +1069,6 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
ASSERT(getter != NULL);
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
@ -1537,9 +1539,10 @@ Local<Script> Script::New(v8::Handle<String> source,
name_obj,
line_offset,
column_offset,
isolate->global_context(),
NULL,
pre_data_impl,
Utils::OpenHandle(*script_data),
Utils::OpenHandle(*script_data, true),
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
@ -3031,6 +3034,17 @@ Local<String> v8::Object::ObjectProtoToString() {
}
Local<Value> v8::Object::GetConstructor() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetConstructor()",
return Local<v8::Function>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> constructor(self->GetConstructor());
return Utils::ToLocal(constructor);
}
Local<String> v8::Object::GetConstructorName() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
@ -3223,7 +3237,7 @@ void v8::Object::TurnOnAccessCheck() {
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map =
isolate->factory()->CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
isolate->factory()->CopyMap(i::Handle<i::Map>(obj->map()));
new_map->set_is_access_check_needed(true);
obj->set_map(*new_map);
}
@ -3258,7 +3272,7 @@ static i::Context* GetCreationContext(i::JSObject* object) {
} else {
function = i::JSFunction::cast(constructor);
}
return function->context()->global_context();
return function->context()->native_context();
}
@ -3287,13 +3301,15 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
i::JSObject::SetHiddenProperty(self, key_symbol, value_obj);
return *result == *self;
}
@ -3305,7 +3321,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@ -3318,7 +3335,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
self->DeleteHiddenProperty(*key_obj);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
self->DeleteHiddenProperty(*key_symbol);
return true;
}
@ -3386,7 +3404,7 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!ApiCheck(length <= i::ExternalPixelArray::kMaxLength,
if (!ApiCheck(length >= 0 && length <= i::ExternalPixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
@ -3442,7 +3460,7 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
if (!ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
return;
@ -3835,6 +3853,9 @@ int String::WriteUtf8(char* buffer,
LOG_API(isolate, "String::WriteUtf8");
ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
if (options & HINT_MANY_WRITES_EXPECTED) {
FlattenString(str); // Flatten the string for efficiency.
}
int string_length = str->length();
if (str->IsAsciiRepresentation()) {
int len;
@ -3891,11 +3912,7 @@ int String::WriteUtf8(char* buffer,
// Slow case.
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
FlattenString(str);
}
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@ -3937,8 +3954,9 @@ int String::WriteUtf8(char* buffer,
c,
unibrow::Utf16::kNoPreviousCharacter);
if (pos + written <= capacity) {
for (int j = 0; j < written; j++)
for (int j = 0; j < written; j++) {
buffer[pos + j] = intermediate[j];
}
pos += written;
nchars++;
} else {
@ -3951,8 +3969,9 @@ int String::WriteUtf8(char* buffer,
}
if (nchars_ref != NULL) *nchars_ref = nchars;
if (!(options & NO_NULL_TERMINATION) &&
(i == len && (capacity == -1 || pos < capacity)))
(i == len && (capacity == -1 || pos < capacity))) {
buffer[pos++] = '\0';
}
return pos;
}
@ -3965,28 +3984,45 @@ int String::WriteAscii(char* buffer,
if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0;
LOG_API(isolate, "String::WriteAscii");
ENTER_V8(isolate);
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
isolate->string_tracker()->RecordWrite(str);
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
FlattenString(str); // Flatten the string for efficiency.
}
if (str->IsAsciiRepresentation()) {
// WriteToFlat is faster than using the StringInputBuffer.
if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start);
i::String::WriteToFlat(*str, buffer, start, start + len);
if (!(options & PRESERVE_ASCII_NULL)) {
for (int i = 0; i < len; i++) {
if (buffer[i] == '\0') buffer[i] = ' ';
}
}
if (!(options & NO_NULL_TERMINATION) && length > len) {
buffer[len] = '\0';
}
return len;
}
i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start;
}
if (end < 0) return 0;
write_input_buffer.Reset(start, *str);
int i;
for (i = 0; i < end; i++) {
char c = static_cast<char>(write_input_buffer.GetNext());
if (c == '\0') c = ' ';
if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
buffer[i] = c;
}
if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length))
if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) {
buffer[i] = '\0';
}
return i;
}
@ -4005,7 +4041,7 @@ int String::Write(uint16_t* buffer,
if (options & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
FlattenString(str);
}
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
@ -4191,8 +4227,9 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(
reinterpret_cast<i::Address>(value), i::TENURED);
if (!foreign.is_null())
Utils::OpenHandle(this)->SetInternalField(index, *foreign);
if (!foreign.is_null()) {
Utils::OpenHandle(this)->SetInternalField(index, *foreign);
}
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@ -4221,6 +4258,20 @@ void v8::V8::SetReturnAddressLocationResolver(
}
bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
return i::ProfileEntryHookStub::SetFunctionEntryHook(entry_hook);
}
void v8::V8::SetJitCodeEventHandler(
JitCodeEventOptions options, JitCodeEventHandler event_handler) {
i::Isolate* isolate = i::Isolate::Current();
// Ensure that logging is initialized for our isolate.
isolate->InitializeLoggingAndCounters();
isolate->logger()->SetCodeEventHandler(options, event_handler);
}
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@ -4355,7 +4406,7 @@ Persistent<Context> v8::Context::New(
// Create the environment.
env = isolate->bootstrapper()->CreateEnvironment(
isolate,
Utils::OpenHandle(*global_object),
Utils::OpenHandle(*global_object, true),
proxy_template,
extensions);
@ -4399,7 +4450,7 @@ void v8::Context::UseDefaultSecurityToken() {
}
ENTER_V8(isolate);
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global());
env->set_security_token(env->global_object());
}
@ -4444,7 +4495,7 @@ v8::Local<v8::Context> Context::GetCurrent() {
if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
return Local<Context>();
}
i::Handle<i::Object> current = isolate->global_context();
i::Handle<i::Object> current = isolate->native_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
@ -4457,7 +4508,7 @@ v8::Local<v8::Context> Context::GetCalling() {
return Local<Context>();
}
i::Handle<i::Object> calling =
isolate->GetCallingGlobalContext();
isolate->GetCallingNativeContext();
if (calling.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
return Utils::ToLocal(context);
@ -4494,9 +4545,9 @@ void Context::ReattachGlobal(Handle<Object> global_object) {
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
isolate->bootstrapper()->ReattachGlobal(
context,
Utils::OpenHandle(*global_object));
i::Handle<i::JSGlobalProxy> global_proxy =
i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
}
@ -4750,6 +4801,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@ -4770,6 +4822,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@ -4784,6 +4837,7 @@ Local<String> v8::String::NewExternal(
EnsureInitializedForIsolate(isolate, "v8::String::NewExternal()");
LOG_API(isolate, "String::NewExternal");
ENTER_V8(isolate);
CHECK(resource && resource->data());
i::Handle<i::String> result = NewExternalAsciiStringHandle(isolate, resource);
isolate->heap()->external_string_table()->AddString(*result);
return Utils::ToLocal(result);
@ -4805,6 +4859,7 @@ bool v8::String::MakeExternal(
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@ -5184,6 +5239,8 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
isolate->stats_table()->SetCreateHistogramFunction(callback);
isolate->InitializeLoggingAndCounters();
isolate->counters()->ResetHistograms();
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
@ -5233,8 +5290,9 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
i::Isolate* isolate = i::Isolate::UncheckedCurrent();
if (isolate == NULL || !isolate->IsInitialized() ||
IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
return 0;
}
return isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
@ -5586,7 +5644,8 @@ bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
foreign =
isolate->factory()->NewForeign(FUNCTION_ADDR(EventCallbackWrapper));
}
isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
isolate->debugger()->SetEventListener(foreign,
Utils::OpenHandle(*data, true));
return true;
}
@ -5601,7 +5660,8 @@ bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
if (that != NULL) {
foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
isolate->debugger()->SetEventListener(foreign, Utils::OpenHandle(*data));
isolate->debugger()->SetEventListener(foreign,
Utils::OpenHandle(*data, true));
return true;
}
@ -5612,7 +5672,7 @@ bool Debug::SetDebugEventListener(v8::Handle<v8::Object> that,
ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
isolate->debugger()->SetEventListener(Utils::OpenHandle(*that),
Utils::OpenHandle(*data));
Utils::OpenHandle(*data, true));
return true;
}
@ -5751,7 +5811,7 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
v8::HandleScope scope;
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global());
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
i::Handle<i::String> name =
isolate->factory()->LookupAsciiSymbol("MakeMirror");
i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
@ -5783,6 +5843,7 @@ void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMessages(true);
}
Local<Context> Debug::GetDebugContext() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
@ -5790,6 +5851,20 @@ Local<Context> Debug::GetDebugContext() {
return Utils::ToLocal(i::Isolate::Current()->debugger()->GetDebugContext());
}
void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) {
// If no isolate is supplied, use the default isolate.
i::Debugger* debugger;
if (isolate != NULL) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
debugger = internal_isolate->debugger();
} else {
debugger = i::Isolate::GetDefaultIsolateDebugger();
}
debugger->set_live_edit_enabled(enable);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@ -6374,12 +6449,28 @@ char* HandleScopeImplementer::RestoreThread(char* storage) {
void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
#ifdef DEBUG
bool found_block_before_deferred = false;
#endif
// Iterate over all handles in the blocks except for the last.
for (int i = blocks()->length() - 2; i >= 0; --i) {
Object** block = blocks()->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
if (last_handle_before_deferred_block_ != NULL &&
(last_handle_before_deferred_block_ < &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
v->VisitPointers(block, last_handle_before_deferred_block_);
ASSERT(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
v->VisitPointers(block, &block[kHandleBlockSize]);
}
}
ASSERT(last_handle_before_deferred_block_ == NULL ||
found_block_before_deferred);
// Iterate over live handles in the last block (if any).
if (!blocks()->is_empty()) {
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
@ -6407,4 +6498,66 @@ char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
return storage + ArchiveSpacePerThread();
}
DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
DeferredHandles* deferred =
new DeferredHandles(isolate()->handle_scope_data()->next, isolate());
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
Object** block_limit = &block_start[kHandleBlockSize];
// We should not need to check for NoHandleAllocation here. Assert
// this.
ASSERT(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;
deferred->blocks_.Add(blocks_.last());
blocks_.RemoveLast();
}
// deferred->blocks_ now contains the blocks installed on the
// HandleScope stack since BeginDeferredScope was called, but in
// reverse order.
ASSERT(prev_limit == NULL || !blocks_.is_empty());
ASSERT(!blocks_.is_empty() && prev_limit != NULL);
ASSERT(last_handle_before_deferred_block_ != NULL);
last_handle_before_deferred_block_ = NULL;
return deferred;
}
void HandleScopeImplementer::BeginDeferredScope() {
ASSERT(last_handle_before_deferred_block_ == NULL);
last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
}
DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
for (int i = 0; i < blocks_.length(); i++) {
#ifdef DEBUG
HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]);
#endif
isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]);
}
}
void DeferredHandles::Iterate(ObjectVisitor* v) {
ASSERT(!blocks_.is_empty());
ASSERT((first_block_limit_ >= blocks_.first()) &&
(first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
v->VisitPointers(blocks_.first(), first_block_limit_);
for (int i = 1; i < blocks_.length(); i++) {
v->VisitPointers(blocks_[i], &blocks_[i][kHandleBlockSize]);
}
}
} } // namespace v8::internal

139
deps/v8/src/api.h

@ -159,6 +159,27 @@ class RegisteredExtension {
};
#define OPEN_HANDLE_LIST(V) \
V(Template, TemplateInfo) \
V(FunctionTemplate, FunctionTemplateInfo) \
V(ObjectTemplate, ObjectTemplateInfo) \
V(Signature, SignatureInfo) \
V(AccessorSignature, FunctionTemplateInfo) \
V(TypeSwitch, TypeSwitchInfo) \
V(Data, Object) \
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Array, JSArray) \
V(String, String) \
V(Script, Object) \
V(Function, JSFunction) \
V(Message, JSObject) \
V(Context, Context) \
V(External, Foreign) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject)
class Utils {
public:
static bool ReportApiFailure(const char* location, const char* message);
@ -205,42 +226,13 @@ class Utils {
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline v8::internal::Handle<v8::internal::TemplateInfo>
OpenHandle(const Template* that);
static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
OpenHandle(const FunctionTemplate* that);
static inline v8::internal::Handle<v8::internal::ObjectTemplateInfo>
OpenHandle(const ObjectTemplate* that);
static inline v8::internal::Handle<v8::internal::Object>
OpenHandle(const Data* data);
static inline v8::internal::Handle<v8::internal::JSRegExp>
OpenHandle(const RegExp* data);
static inline v8::internal::Handle<v8::internal::JSObject>
OpenHandle(const v8::Object* data);
static inline v8::internal::Handle<v8::internal::JSArray>
OpenHandle(const v8::Array* data);
static inline v8::internal::Handle<v8::internal::String>
OpenHandle(const String* data);
static inline v8::internal::Handle<v8::internal::Object>
OpenHandle(const Script* data);
static inline v8::internal::Handle<v8::internal::JSFunction>
OpenHandle(const Function* data);
static inline v8::internal::Handle<v8::internal::JSObject>
OpenHandle(const Message* message);
static inline v8::internal::Handle<v8::internal::JSArray>
OpenHandle(const StackTrace* stack_trace);
static inline v8::internal::Handle<v8::internal::JSObject>
OpenHandle(const StackFrame* stack_frame);
static inline v8::internal::Handle<v8::internal::Context>
OpenHandle(const v8::Context* context);
static inline v8::internal::Handle<v8::internal::SignatureInfo>
OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::FunctionTemplateInfo>
OpenHandle(const v8::AccessorSignature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Foreign>
OpenHandle(const v8::External* that);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
OpenHandle(const From* that, bool allow_empty_handle = false);
OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
#undef DECLARE_OPEN_HANDLE
};
@ -257,7 +249,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
if (!is_null()) {
handle = *this;
}
return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)), true);
}
@ -294,33 +286,18 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
// Implementations of OpenHandle
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle(\
const v8::From* that) { \
return v8::internal::Handle<v8::internal::To>( \
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
MAKE_OPEN_HANDLE(Template, TemplateInfo)
MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo)
MAKE_OPEN_HANDLE(Signature, SignatureInfo)
MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo)
MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo)
MAKE_OPEN_HANDLE(Data, Object)
MAKE_OPEN_HANDLE(RegExp, JSRegExp)
MAKE_OPEN_HANDLE(Object, JSObject)
MAKE_OPEN_HANDLE(Array, JSArray)
MAKE_OPEN_HANDLE(String, String)
MAKE_OPEN_HANDLE(Script, Object)
MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context)
MAKE_OPEN_HANDLE(External, Foreign)
MAKE_OPEN_HANDLE(StackTrace, JSArray)
MAKE_OPEN_HANDLE(StackFrame, JSObject)
OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
#undef OPEN_HANDLE_LIST
namespace internal {
@ -392,6 +369,32 @@ class StringTracker {
};
class DeferredHandles {
public:
~DeferredHandles();
private:
DeferredHandles(Object** first_block_limit, Isolate* isolate)
: next_(NULL),
previous_(NULL),
first_block_limit_(first_block_limit),
isolate_(isolate) {
isolate->LinkDeferredHandles(this);
}
void Iterate(ObjectVisitor* v);
List<Object**> blocks_;
DeferredHandles* next_;
DeferredHandles* previous_;
Object** first_block_limit_;
Isolate* isolate_;
friend class HandleScopeImplementer;
friend class Isolate;
};
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose internal implementation details in
@ -409,7 +412,8 @@ class HandleScopeImplementer {
entered_contexts_(0),
saved_contexts_(0),
spare_(NULL),
call_depth_(0) { }
call_depth_(0),
last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
DeleteArray(spare_);
@ -445,6 +449,13 @@ class HandleScopeImplementer {
inline bool HasSavedContexts();
inline List<internal::Object**>* blocks() { return &blocks_; }
Isolate* isolate() const { return isolate_; }
void ReturnBlock(Object** block) {
ASSERT(block != NULL);
if (spare_ != NULL) DeleteArray(spare_);
spare_ = block;
}
private:
void ResetAfterArchive() {
@ -452,6 +463,7 @@ class HandleScopeImplementer {
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
spare_ = NULL;
last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
}
@ -469,6 +481,9 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0);
}
void BeginDeferredScope();
DeferredHandles* Detach(Object** prev_limit);
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
@ -477,6 +492,7 @@ class HandleScopeImplementer {
List<Context*> saved_contexts_;
Object** spare_;
int call_depth_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
@ -484,6 +500,9 @@ class HandleScopeImplementer {
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
friend class DeferredHandles;
friend class DeferredHandleScope;
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
};

5
deps/v8/src/arm/assembler-arm-inl.h

@ -141,10 +141,7 @@ Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
Object* object = HeapObject::FromAddress(
address - JSGlobalPropertyCell::kValueOffset);
return reinterpret_cast<JSGlobalPropertyCell*>(object);
return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
}

171
deps/v8/src/arm/assembler-arm.cc

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
@ -52,17 +52,20 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0;
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
#endif // def CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // def CAN_USE_VFP_INSTRUCTIONS
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
#endif // CAN_USE_VFP3_INSTRUCTIONS
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
@ -70,18 +73,18 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7;
answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
#endif // def __arm__
#endif // _arm__
return answer;
}
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
unsigned standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
@ -101,27 +104,32 @@ void CpuFeatures::Probe() {
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3 | 1u << ARMv7;
supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
// 0406B, page A1-6.
supported_ |= 1u << VFP3 | 1u << ARMv7;
found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
} else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
found_by_runtime_probing_ |= 1u << VFP2;
}
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
supported_ |= found_by_runtime_probing_;
#endif
// Assert that VFP3 implies VFP2 and ARMv7.
ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
}
@ -292,8 +300,10 @@ static const int kMinimalBufferSize = 4*KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@ -746,7 +756,7 @@ static bool fits_shifter(uint32_t imm32,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kCmpCmnFlip;
return true;
}
@ -754,7 +764,7 @@ static bool fits_shifter(uint32_t imm32,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
@ -775,13 +785,14 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Operand::must_use_constant_pool() const {
bool Operand::must_use_constant_pool(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
} else if (rmode_ == RelocInfo::NONE) {
return false;
@ -790,16 +801,17 @@ bool Operand::must_use_constant_pool() const {
}
bool Operand::is_single_instruction(Instr instr) const {
bool Operand::is_single_instruction(const Assembler* assembler,
Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
if (must_use_constant_pool() ||
if (must_use_constant_pool(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
if (must_use_constant_pool(assembler) ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
@ -833,7 +845,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (x.must_use_constant_pool() ||
if (x.must_use_constant_pool(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@ -842,7 +854,7 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() ||
if (x.must_use_constant_pool(this) ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@ -854,7 +866,7 @@ void Assembler::addrmod1(Instr instr,
} else {
// If this is not a mov or mvn instruction we may still be able to avoid
// a constant pool entry by using mvn or movw.
if (!x.must_use_constant_pool() &&
if (!x.must_use_constant_pool(this) &&
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
@ -1379,7 +1391,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
if (src.must_use_constant_pool() ||
if (src.must_use_constant_pool(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
@ -1656,7 +1668,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1698,7 +1710,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1742,7 +1754,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1783,7 +1795,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1826,7 +1838,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1834,6 +1846,7 @@ void Assembler::vldm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@ -1847,7 +1860,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1855,6 +1868,7 @@ void Assembler::vstm(BlockAddrMode am,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
ASSERT(count <= 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@ -1867,7 +1881,7 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1888,7 +1902,7 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1911,7 +1925,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
//
@ -1964,10 +1978,10 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = immediate
// Instruction details available in ARM DDI 0406B, A8-640.
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
if (FitsVMOVDoubleImmediate(imm, &enc)) {
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
} else {
@ -2001,7 +2015,7 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@ -2014,7 +2028,7 @@ void Assembler::vmov(const DwVfpRegister dst,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
}
@ -2028,7 +2042,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@ -2043,7 +2057,7 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@ -2057,7 +2071,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@ -2072,7 +2086,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@ -2197,7 +2211,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@ -2206,7 +2220,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@ -2215,7 +2229,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@ -2224,7 +2238,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@ -2233,7 +2247,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@ -2242,7 +2256,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@ -2251,7 +2265,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@ -2259,6 +2273,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code());
}
@ -2267,6 +2282,7 @@ void Assembler::vneg(const DwVfpRegister dst,
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
0x5*B9 | B8 | 0x3*B6 | src.code());
}
@ -2281,7 +2297,7 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2296,7 +2312,7 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@ -2311,7 +2327,7 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2326,7 +2342,7 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
@ -2339,7 +2355,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
@ -2352,7 +2368,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
@ -2363,7 +2379,7 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2373,7 +2389,7 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2384,7 +2400,7 @@ void Assembler::vsqrt(const DwVfpRegister dst,
const Condition cond) {
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
}
@ -2435,6 +2451,14 @@ void Assembler::RecordComment(const char* msg) {
}
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
#ifdef ENABLE_DEBUGGER_SUPPORT
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
#endif
}
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@ -2511,12 +2535,15 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
(rmode == RelocInfo::CONST_POOL)) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
|| RelocInfo::IsPosition(rmode)
|| RelocInfo::IsConstPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@ -2542,7 +2569,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
RelocInfo reloc_info_with_ast_id(pc_,
rmode,
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@ -2602,13 +2632,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int needed_space = jump_instr + kInstrSize +
num_pending_reloc_info_ * kInstrSize + kGap;
int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
RecordComment("[ Constant Pool");
RecordConstPool(size);
// Emit jump over constant pool if necessary.
Label after_pool;
@ -2616,8 +2648,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_);
@ -2627,7 +2657,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.

48
deps/v8/src/arm/assembler-arm.h

@ -424,8 +424,8 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
bool must_use_constant_pool(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
@ -510,6 +510,7 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
return (supported_ & (1u << f)) != 0;
}
@ -535,6 +536,8 @@ class CpuFeatures : public AllStatic {
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
// VFP2 and ARMv7 are implied by VFP3.
if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
@ -645,6 +648,11 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@ -1164,6 +1172,8 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; }
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@ -1203,22 +1213,41 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(unsigned ast_id) {
ASSERT(recorded_ast_id_ == kNoASTId);
void SetRecordedAstId(TypeFeedbackId ast_id) {
ASSERT(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
unsigned RecordedAstId() {
ASSERT(recorded_ast_id_ != kNoASTId);
TypeFeedbackId RecordedAstId() {
ASSERT(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Record the emission of a constant pool.
//
// The emission of constant pool depends on the size of the code generated and
// the number of RelocInfo recorded.
// The Debug mechanism needs to map code offsets between two versions of a
// function, compiled with and without debugger support (see for example
// Debug::PrepareForBreakPoints()).
// Compiling functions with debugger support generates additional code
// (Debug::GenerateSlot()). This may affect the emission of the constant
// pools and cause the version of the code with debugger support to have
// constant pools generated in different places.
// Recording the position and size of emitted constant pools allows to
// correctly compute the offset mappings between the different versions of a
// function in all situations.
//
// The parameter indicates the size of the constant pool (in bytes), including
// the marker and branch over the data.
void RecordConstPool(int size);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
@ -1283,7 +1312,7 @@ class Assembler : public AssemblerBase {
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
unsigned recorded_ast_id_;
TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
@ -1425,7 +1454,10 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
friend class EnsureSpace;
};

65
deps/v8/src/arm/builtins-arm.cc

@ -75,12 +75,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
// Load the global context.
// Load the native context.
__ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
// Load the InternalArray function from the global context.
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(result,
FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(
@ -90,12 +91,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the global context.
// Load the native context.
__ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(result,
FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
// Load the Array function from the global context.
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(result,
FieldMemOperand(result, GlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ ldr(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@ -697,6 +699,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(pc, r2);
}
void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push call kind information.
__ push(r5);
__ push(r1); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kParallelRecompile, 1);
// Restore call kind information.
__ pop(r5);
// Restore receiver.
__ pop(r1);
// Tear down internal frame.
}
GenerateTailCallToSharedCode(masm);
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@ -1246,7 +1285,7 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3);
if (!CpuFeatures::IsSupported(VFP3)) {
if (!CPU::SupportsCrankshaft()) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
@ -1366,9 +1405,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
@ -1561,9 +1600,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));

372
deps/v8/src/arm/code-stubs-arm.cc

@ -85,6 +85,8 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
Counters* counters = masm->isolate()->counters();
Label gc;
// Pop the function info from the stack.
@ -98,32 +100,44 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
&gc,
TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
// Compute the function map in the current global context and set that
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
__ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ ldr(r1,
FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ tst(r1, r1);
__ b(ne, &check_optimized);
}
__ bind(&install_unoptimized);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
@ -131,6 +145,72 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Return result. The argument function info has been popped already.
__ Ret();
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
// r2 holds native context, r1 points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into r4.
__ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
__ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
__ cmp(r2, r5);
__ b(eq, &install_optimized);
// Iterate through the rest of map backwards. r4 holds an index as a Smi.
Label loop;
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ b(eq, &install_unoptimized);
__ sub(r4, r4, Operand(
Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r5, MemOperand(r5));
__ cmp(r2, r5);
__ b(ne, &loop);
// Hit: fetch the optimized code.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(r5, r5, Operand(kPointerSize));
__ ldr(r4, MemOperand(r5));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(),
1, r6, r7);
// TODO(fschneider): Idea: store proper code pointers in the map and either
// unmangle them on marking or do nothing as the whole map is discarded on
// major GC anyway.
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Now link a function into a list of optimized functions.
__ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// No need for write barrier as JSFunction (eax) is in the new space.
__ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
// Store JSFunction (eax) into edx before issuing write barrier as
// it clobbers all the registers passed.
__ mov(r4, r0);
__ RecordWriteContextSlot(
r2,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
r4,
r1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Return result. The argument function info has been popped already.
__ Ret();
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
@ -162,12 +242,12 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
// Set up the fixed slots, copy the global object from the previous context.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@ -210,9 +290,9 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// If this block context is nested in the global context we get a smi
// If this block context is nested in the native context we get a smi
// sentinel instead of a function. The block context should get the
// canonical empty function of the global context as its closure which
// canonical empty function of the native context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
@ -222,16 +302,16 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Set up the fixed slots, copy the global object from the previous context.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
__ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
__ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@ -519,8 +599,8 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
__ vmov(d7.high(), scratch1);
__ vcvt_f64_s32(d7, d7.high());
@ -589,9 +669,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
if (CpuFeatures::IsSupported(VFP3) &&
if (CpuFeatures::IsSupported(VFP2) &&
destination == kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(dst, scratch1, HeapNumber::kValueOffset);
@ -604,8 +684,8 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Convert smi to double using VFP instructions.
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
@ -682,8 +762,8 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Label done;
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
@ -776,8 +856,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
@ -847,8 +927,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
@ -978,7 +1058,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@ -990,7 +1070,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vstr(d0,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@ -1209,9 +1289,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
// Convert lhs to a double in d7.
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, rhs, Operand(kHeapObjectTag));
@ -1249,8 +1329,8 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Load the double from lhs, tagged HeapNumber r1, to d7.
__ sub(r7, lhs, Operand(kHeapObjectTag));
__ vldr(d7, r7, HeapNumber::kValueOffset);
@ -1362,7 +1442,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ push(lr);
__ PrepareCallCFunction(0, 2, r5);
if (masm->use_eabi_hardfloat()) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@ -1437,8 +1517,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ sub(r7, rhs, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
__ sub(r7, lhs, Operand(kHeapObjectTag));
@ -1527,8 +1607,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ CheckMap(object,
scratch1,
Heap::kHeapNumberMapRootIndex,
@ -1659,9 +1739,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
__ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
__ VFPCompareAndSetFlags(d7, d6);
@ -1779,11 +1859,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@ -1836,13 +1914,56 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
} else {
Label done, not_nan, not_zero;
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
// -0 maps to false:
__ bic(
temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
__ b(ne, &not_zero);
// If exponent word is zero then the answer depends on the mantissa word.
__ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
__ jmp(&done);
// Check for NaN.
__ bind(&not_zero);
// We already zeroed the sign bit, now shift out the mantissa so we only
// have the exponent left.
__ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
unsigned int shifted_exponent_mask =
HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
__ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
__ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
// Reload exponent word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
__ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
// If mantissa is not zero then we have a NaN, so return 0.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ b(ne, &done);
// Load mantissa word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
__ cmp(temp, Operand(0, RelocInfo::NONE));
// If mantissa is not zero then we have a NaN, so return 0.
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ b(ne, &done);
__ bind(&not_nan);
__ mov(tos_, Operand(1, RelocInfo::NONE));
__ bind(&done);
}
__ Ret();
__ bind(&not_heap_number);
}
@ -1892,7 +2013,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
@ -1910,7 +2031,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
@ -2140,9 +2261,9 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ mov(r0, r2); // Move newly allocated heap number to r0.
}
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag));
@ -2442,7 +2563,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP3) &&
CpuFeatures::IsSupported(VFP2) &&
op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@ -2469,7 +2590,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Using VFP registers:
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
@ -2558,7 +2679,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
@ -2597,10 +2718,10 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
@ -2759,7 +2880,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
(CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
(CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
? FloatingPointHelper::kVFPRegisters
: FloatingPointHelper::kCoreRegisters;
@ -2787,7 +2908,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@ -2954,9 +3075,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
// The non vfp3 code does not support this special case, so jump to
// The non vfp2 code does not support this special case, so jump to
// runtime if we don't support it.
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
__ b(mi, (result_type_ <= BinaryOpIC::INT32)
? &transition
: &return_heap_number);
@ -2991,8 +3112,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
&call_runtime);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
@ -3221,8 +3342,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
if (tagged) {
// Argument is a number and is on stack and in r0.
// Load argument and check if it is a smi.
@ -3323,23 +3444,23 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
CpuFeatures::Scope scope(VFP3);
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
Label no_update;
Label skip_cache;
// Call C function to calculate the result and update the cache.
// Register r0 holds precalculated cache entry address; preserve
// it on the stack and pop it into register cache_entry after the
// call.
__ push(cache_entry);
// r0: precalculated cache entry address.
// r2 and r3: parts of the double value.
// Store r0, r2 and r3 on stack for later before calling C function.
__ Push(r3, r2, cache_entry);
GenerateCallCFunction(masm, scratch0);
__ GetCFunctionDoubleResult(d2);
// Try to update the cache. If we cannot allocate a
// heap number, we return the result without updating.
__ pop(cache_entry);
__ Pop(r3, r2, cache_entry);
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
@ -3385,6 +3506,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
Isolate* isolate = masm->isolate();
__ push(lr);
@ -3445,7 +3567,7 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp3_scope(VFP3);
CpuFeatures::Scope vfp2_scope(VFP2);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
@ -3544,7 +3666,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Add +0 to convert -0 to +0.
__ vadd(double_scratch, double_base, kDoubleRegZero);
__ vmov(double_result, 1);
__ vmov(double_result, 1.0);
__ vsqrt(double_scratch, double_scratch);
__ vdiv(double_result, double_result, double_scratch);
__ jmp(&done);
@ -3901,8 +4023,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
@ -3917,7 +4039,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
}
__ ldr(r4, MemOperand(sp, offset_to_argv));
@ -4055,8 +4177,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
#endif
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Restore callee-saved vfp registers.
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
}
@ -4385,14 +4507,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r0 = address of new object(s) (tagged)
// r2 = argument count (tagged)
// Get the arguments boilerplate from the current (global) context into r4.
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ cmp(r1, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@ -4565,9 +4687,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT |
SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
// Get the arguments boilerplate from the current native context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
@ -4696,7 +4818,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
__ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
__ b(hi, &runtime);
// r2: Number of capture registers
@ -5082,10 +5204,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set empty properties FixedArray.
// Set elements to point to FixedArray allocated right after the JSArray.
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
__ mov(r4, Operand(factory->empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
@ -5191,7 +5313,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
__ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r3,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
@ -6583,8 +6706,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag));
@ -7131,6 +7254,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
// StoreArrayLiteralElementStub::Generate
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
@ -7431,6 +7556,65 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
}
}
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
// Save live volatile registers.
__ Push(lr, r5, r1);
const int32_t kNumSavedRegs = 3;
// Compute the function's address for the first argument.
__ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
// The caller's return address is above the saved temporaries.
// Grab that for the second argument to the hook.
__ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
// Align the stack if necessary.
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(r5, sp);
ASSERT(IsPowerOf2(frame_alignment));
__ and_(sp, sp, Operand(-frame_alignment));
}
#if defined(V8_HOST_ARCH_ARM)
__ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
__ ldr(ip, MemOperand(ip));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
Address trampoline_address = reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(EntryHookTrampoline));
ApiFunction dispatcher(trampoline_address);
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
masm->isolate())));
#endif
__ Call(ip);
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
__ mov(sp, r5);
}
__ Pop(lr, r5, r1);
__ Ret();
}
#undef __
} } // namespace v8::internal

16
deps/v8/src/arm/code-stubs-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -149,7 +149,7 @@ class BinaryOpStub: public CodeStub {
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -159,7 +159,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
@ -171,7 +171,7 @@ class BinaryOpStub: public CodeStub {
Token::Value op_;
OverwriteMode mode_;
bool use_vfp3_;
bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
@ -182,7 +182,7 @@ class BinaryOpStub: public CodeStub {
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
@ -190,7 +190,7 @@ class BinaryOpStub: public CodeStub {
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP3Bits::encode(use_vfp3_)
| VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
@ -586,7 +586,7 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);

37
deps/v8/src/arm/codegen-arm.cc

@ -107,7 +107,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
@ -121,15 +121,34 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
__ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
__ add(lr, lr, Operand(r5, LSL, 2));
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedDoubleArray, not tagged as heap object
// r6: destination FixedDoubleArray, not tagged as heap object.
// Align the array conveniently for doubles.
// Store a filler value in the unused memory.
Label aligned, aligned_done;
__ tst(r6, Operand(kDoubleAlignmentMask));
__ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
__ b(eq, &aligned);
// Store at the beginning of the allocated memory and update the base pointer.
__ str(ip, MemOperand(r6, kPointerSize, PostIndex));
__ b(&aligned_done);
__ bind(&aligned);
// Store the filler at the end of the allocated memory.
__ sub(lr, lr, Operand(kPointerSize));
__ str(ip, MemOperand(r6, lr));
__ bind(&aligned_done);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@ -163,7 +182,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp3_supported) __ Push(r1, r0);
if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@ -191,8 +210,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
if (vfp3_supported) {
CpuFeatures::Scope scope(VFP3);
if (vfp2_supported) {
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
@ -225,7 +244,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
if (!vfp3_supported) __ Pop(r1, r0);
if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}

5
deps/v8/src/arm/constants-arm.h

@ -56,8 +56,9 @@
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
#if defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || \
#if defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1

174
deps/v8/src/arm/deoptimizer-arm.cc

@ -50,6 +50,10 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (!function->IsOptimized()) return;
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
function->shared()->ClearOptimizedCodeMap();
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@ -69,8 +73,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
RelocInfo::NONE);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
@ -97,8 +104,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
// Iterate over all the functions which share the same code object
// and make them use unoptimized version.
Context* context = function->context()->native_context();
Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
SharedFunctionInfo* shared = function->shared();
while (!element->IsUndefined()) {
JSFunction* func = JSFunction::cast(element);
// Grab element before code replacement as ReplaceCode alters the list.
element = func->next_function_link();
if (func->code() == code) {
func->ReplaceCode(shared->code());
}
}
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@ -196,11 +214,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
}
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
if (data->AstId(i) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
@ -219,7 +237,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, ast_id);
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
@ -239,9 +257,9 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
USE(function);
ASSERT(function == function_);
int closure_id = iterator.Next();
USE(closure_id);
ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
@ -352,8 +370,8 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function));
function->PrintName();
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@ -577,19 +595,145 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame) {
JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
// The receiver (and the implicit return value, if any) are expected in
// registers by the LoadIC/StoreIC, so they don't belong to the output stack
// frame. This means that we have to use a height of 0.
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
const char* kind = is_setter_stub_frame ? "setter" : "getter";
if (FLAG_trace_deopt) {
PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
}
// We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
// code object, see MacroAssembler::EnterFrame). For a setter stub frames we
// need one additional entry for the implicit return value, see
// StoreStubCompiler::CompileStoreViaSetter.
unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, accessor);
output_frame->SetFrameType(StackFrame::INTERNAL);
// A frame for an accessor stub can not be the topmost or bottommost one.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
unsigned output_offset = output_frame_size;
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; function (%s sentinel)\n",
top_address + output_offset, output_offset, value, kind);
}
// Get Code object from accessor stub.
output_offset -= kPointerSize;
Builtins::Name name = is_setter_stub_frame ?
Builtins::kStoreIC_Setter_ForDeopt :
Builtins::kLoadIC_Getter_ForDeopt;
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
" ; code object\n",
top_address + output_offset, output_offset, value);
}
// Skip receiver.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
iterator->Skip(Translation::NumberOfOperandsFor(opcode));
if (is_setter_stub_frame) {
// The implicit return value was part of the artificial setter stub
// environment.
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
Smi* offset = is_setter_stub_frame ?
isolate_->heap()->setter_stub_deopt_pc_offset() :
isolate_->heap()->getter_stub_deopt_pc_offset();
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
}
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
BailoutId node_id = BailoutId(iterator->Next());
JSFunction* function;
if (frame_index != 0) {
function = JSFunction::cast(ComputeLiteral(iterator->Next()));
} else {
int closure_id = iterator->Next();
USE(closure_id);
ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
function = function_;
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and

235
deps/v8/src/arm/full-codegen-arm.cc

@ -134,6 +134,8 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -182,10 +184,13 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
__ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
@ -262,7 +267,7 @@ void FullCodeGenerator::Generate() {
scope()->VisitIllegalRedeclaration(this);
} else {
PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
@ -277,7 +282,7 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(AstNode::kDeclarationsId, NO_REGISTERS);
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
@ -328,7 +333,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
if (isolate()->IsDebuggerActive()) {
// Detect debug break requests as soon as possible.
reset_value = 10;
reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
__ mov(r3, Operand(Smi::FromInt(reset_value)));
@ -336,10 +341,6 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
static const int kMaxBackEdgeWeight = 127;
static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@ -353,7 +354,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
@ -405,7 +406,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kBackEdgeDistanceDivisor));
Max(1, distance / kBackEdgeDistanceUnit));
}
EmitProfilingCounterDecrement(weight);
Label ok;
@ -675,18 +676,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(VFP3)) {
ToBooleanStub stub(result_register());
__ CallStub(&stub);
__ tst(result_register(), result_register());
} else {
// Call the runtime to find the boolean value of the source and then
// translate it into control flow to the pair of labels.
__ push(result_register());
__ CallRuntime(Runtime::kToBool, 1);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(r0, ip);
}
ToBooleanStub stub(result_register());
__ CallStub(&stub);
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@ -787,7 +779,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
@ -840,10 +832,9 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
ASSERT(mode == VAR || mode == LET ||
mode == CONST || mode == CONST_HARMONY);
PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
? READ_ONLY : NONE;
ASSERT(IsDeclaredVariableMode(mode));
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@ -1133,26 +1124,34 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
__ mov(r2, r0);
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
__ cmp(r2, ip);
__ b(ne, &fixed_array);
// We got a map in register r0. Get the enumeration cache from it.
Label no_descriptors;
__ bind(&use_cache);
__ LoadInstanceDescriptors(r0, r1);
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ EnumLength(r1, r0);
__ cmp(r1, Operand(Smi::FromInt(0)));
__ b(eq, &no_descriptors);
__ LoadInstanceDescriptors(r0, r2, r4);
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
__ jmp(&loop);
__ bind(&no_descriptors);
__ Drop(1);
__ jmp(&exit);
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
@ -1161,7 +1160,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
RecordTypeFeedbackCell(stmt->PrepareId(), cell);
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
@ -1317,9 +1316,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ Move(next, current);
}
__ bind(&loop);
// Terminate at global context.
// Terminate at native context.
__ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
// Check that extension is NULL.
@ -1607,7 +1606,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(isolate()->zone());
AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@ -1633,7 +1632,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, key->id());
CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1839,11 +1838,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
@ -1900,7 +1899,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@ -1908,7 +1907,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
}
@ -1935,7 +1934,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -2018,7 +2018,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@ -2149,7 +2150,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
if (FLAG_debug_code && op == Token::INIT_LET) {
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@ -2207,7 +2208,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2253,7 +2254,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2277,6 +2278,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
VisitForAccumulatorValue(expr->obj());
EmitNamedPropertyLoad(expr);
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0);
} else {
VisitForStackValue(expr->obj());
@ -2290,7 +2292,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id) {
TypeFeedbackId ast_id) {
ic_total_count_++;
__ Call(code, rmode, ast_id);
}
@ -2312,7 +2314,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
CallIC(ic, mode, expr->id());
CallIC(ic, mode, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2345,7 +2347,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2365,16 +2367,14 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
// Record source position for debugger.
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
if (!Serializer::enabled()) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->id(), cell);
__ mov(r2, Operand(cell));
}
// Record call targets in unoptimized code.
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(r2, Operand(cell));
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@ -2564,21 +2564,15 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
CallFunctionFlags flags;
if (!Serializer::enabled()) {
flags = RECORD_CALL_TARGET;
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->id(), cell);
__ mov(r2, Operand(cell));
} else {
flags = NO_CALL_FUNCTION_FLAGS;
}
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackCells::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(r2, Operand(cell));
CallConstructStub stub(flags);
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
@ -2720,7 +2714,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
if (FLAG_debug_code) __ AbortIfSmi(r0);
if (generate_debug_code_) __ AbortIfSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
@ -2737,7 +2731,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false
// negative.
__ LoadInstanceDescriptors(r1, r4);
__ LoadInstanceDescriptors(r1, r4, r3);
__ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: descriptor array
// r3: length of descriptor array
@ -2751,8 +2745,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ add(r4,
r4,
Operand(FixedArray::kHeaderSize - kHeapObjectTag +
DescriptorArray::kFirstIndex * kPointerSize));
Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@ -2764,7 +2757,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r3, MemOperand(r4, 0));
__ cmp(r3, ip);
__ b(eq, if_false);
__ add(r4, r4, Operand(kPointerSize));
__ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmp(r4, Operand(r2));
__ b(ne, &loop);
@ -2774,8 +2767,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
__ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
__ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ cmp(r2, r3);
__ b(ne, if_false);
@ -3052,13 +3045,14 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0);
__ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@ -3075,9 +3069,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
__ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
__ ldr(r1,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
@ -3139,20 +3134,19 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label runtime, done;
Label runtime, done, not_date_object;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
#ifdef DEBUG
__ AbortIfSmi(object);
__ JumpIfSmi(object, &not_date_object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
__ Assert(eq, "Trying to get date field from non-date.");
#endif
__ b(ne, &not_date_object);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
__ jmp(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
@ -3169,8 +3163,12 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
__ jmp(&done);
}
__ bind(&not_date_object);
__ CallRuntime(Runtime::kThrowNotDateError, 0);
__ bind(&done);
context()->Plug(r0);
}
@ -3181,7 +3179,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
@ -3433,10 +3431,11 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
VisitForAccumulatorValue(args->last()); // Function.
// Check for proxy.
Label proxy, done;
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
__ b(eq, &proxy);
Label runtime, done;
// Check for non-function argument (including proxy).
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &runtime);
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
@ -3446,7 +3445,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
__ bind(&proxy);
__ bind(&runtime);
__ push(r0);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@ -3474,7 +3473,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
Handle<FixedArray> jsfunction_result_caches(
isolate()->global_context()->jsfunction_result_caches());
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@ -3486,8 +3485,8 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
Register key = r0;
Register cache = r1;
__ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
__ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
__ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ ldr(cache,
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
@ -3584,9 +3583,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
__ AbortIfNotString(r0);
}
__ AbortIfNotString(r0);
__ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
__ IndexFromHash(r0, r0);
@ -3658,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
if (FLAG_debug_code) {
if (generate_debug_code_) {
__ cmp(array_length, Operand(0));
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
@ -3856,7 +3854,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
CallIC(ic, mode, expr->id());
CallIC(ic, mode, expr->CallRuntimeFeedbackId());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -4011,7 +4009,8 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(r0);
}
@ -4069,7 +4068,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
PrepareForBailout(expr->expression(), TOS_REG);
} else {
PrepareForBailoutForId(expr->CountId(), TOS_REG);
PrepareForBailoutForId(prop->LoadId(), TOS_REG);
}
// Call ToNumber only if operand is not a smi.
@ -4122,7 +4121,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -4154,7 +4153,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4171,7 +4170,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -4380,7 +4379,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand(0));
@ -4464,7 +4463,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the global context have a canonical empty function
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
@ -4509,6 +4508,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
__ ldr(r1, MemOperand(ip));
__ SmiTag(r1);
__ push(r1);
ExternalReference pending_message_script =
@ -4529,6 +4529,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ str(r1, MemOperand(ip));
__ pop(r1);
__ SmiUntag(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));

2
deps/v8/src/arm/ic-arm.cc

@ -396,7 +396,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(kind,
MONOMORPHIC,
extra_state,
NORMAL,
Code::NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);

237
deps/v8/src/arm/lithium-arm.cc

@ -407,24 +407,14 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
instructions_(32, graph->zone()),
pointer_maps_(8, graph->zone()),
inlined_closures_(1, graph->zone()) {
}
int LChunk::GetNextSpillIndex(bool is_double) {
int LPlatformChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index, zone());
@ -434,120 +424,9 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
}
void LChunk::MarkEmptyBlocks() {
HPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
int last = block->last_instruction_index();
LInstruction* first_instr = instructions()->at(first);
LInstruction* last_instr = instructions()->at(last);
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
LInstruction* cur = instructions()->at(i);
if (cur->IsGap()) {
LGap* gap = LGap::cast(cur);
if (!gap->IsRedundant()) {
can_eliminate = false;
}
} else {
can_eliminate = false;
}
}
if (can_eliminate) {
label->set_replacement(GetLabel(goto_instr->block_id()));
}
}
}
}
}
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap, zone());
index = instructions_.length();
instructions_.Add(instr, zone());
} else {
index = instructions_.length();
instructions_.Add(instr, zone());
instructions_.Add(gap, zone());
}
if (instr->HasPointerMap()) {
pointer_maps_.Add(instr->pointer_map(), zone());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id(), zone());
}
int LChunk::GetParameterStackSlot(int index) const {
// The receiver is at index 0, the first parameter at index 1, so we
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
LGap* LChunk::GetGapAt(int index) const {
return LGap::cast(instructions_[index]);
}
bool LChunk::IsGapAt(int index) const {
return instructions_[index]->IsGap();
}
int LChunk::NearestGapPos(int index) const {
while (!IsGapAt(index)) index--;
return index;
}
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(
LGap::START, zone())->AddMove(from, to, zone());
}
Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
}
Representation LChunk::LookupLiteralRepresentation(
LConstantOperand* operand) const {
return graph_->LookupValue(operand->index())->representation();
}
LChunk* LChunkBuilder::Build() {
LPlatformChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LChunk(info(), graph());
chunk_ = new(zone()) LPlatformChunk(info(), graph());
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@ -562,17 +441,8 @@ LChunk* LChunkBuilder::Build() {
}
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
SmartArrayPointer<char> name(
info()->shared_info()->DebugName()->ToCString());
PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
va_end(arguments);
PrintF("\n");
}
void LChunkBuilder::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@ -741,7 +611,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
ASSERT(pending_deoptimization_ast_id_.IsNone());
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = sim->ast_id();
}
@ -836,13 +706,16 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
break;
if (op == Token::SHR && constant_value == 0) {
if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
break;
}
}
}
}
@ -975,8 +848,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber ||
BailoutId ast_id = hydrogen_env->ast_id();
ASSERT(!ast_id.IsNone() ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result = new(zone()) LEnvironment(
@ -1001,7 +874,9 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
result->AddValue(op,
value->representation(),
value->CheckFlag(HInstruction::kUint32));
}
if (hydrogen_env->frame_type() == JS_FUNCTION) {
@ -1480,6 +1355,25 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
LOperand* left = NULL;
LOperand* right = NULL;
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
left = UseRegisterAtStart(instr->LeastConstantOperand());
right = UseOrConstantAtStart(instr->MostConstantOperand());
} else {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
left = UseRegisterAtStart(instr->left());
right = UseRegisterAtStart(instr->right());
}
return DefineAsRegister(new(zone()) LMathMinMax(left, right));
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
@ -1645,6 +1539,12 @@ LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
}
LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
LOperand* map = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LMapEnumLength(map));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LElementsKind(object));
@ -1662,12 +1562,12 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterAtStart(instr->index());
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
}
@ -1751,7 +1651,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
if (val->CheckFlag(HInstruction::kUint32)) {
LNumberTagU* result = new(zone()) LNumberTagU(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineAsRegister(new(zone()) LSmiTag(value));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@ -1759,8 +1662,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else {
ASSERT(to.IsDouble());
LOperand* value = Use(instr->value());
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
return DefineAsRegister(
new(zone()) LUint32ToDouble(UseRegister(instr->value())));
} else {
return DefineAsRegister(
new(zone()) LInteger32ToDouble(Use(instr->value())));
}
}
}
UNREACHABLE();
@ -1956,9 +1864,10 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
@ -1968,7 +1877,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
@ -1987,7 +1897,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
@ -2015,7 +1926,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
@ -2032,7 +1944,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
@ -2053,7 +1966,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
@ -2309,7 +2223,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instruction_pending_deoptimization_environment_->
SetDeferredLazyDeoptimizationEnvironment(result->environment());
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
pending_deoptimization_ast_id_ = BailoutId::None();
return result;
}
@ -2335,7 +2249,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->function(),
undefined,
instr->call_kind(),
instr->is_construct());
instr->inlining_kind());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
@ -2381,8 +2295,9 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
LOperand* scratch = TempRegister();
return AssignEnvironment(DefineAsRegister(
new(zone()) LForInCacheArray(map)));
new(zone()) LForInCacheArray(map, scratch)));
}

136
deps/v8/src/arm/lithium-arm.h

@ -108,6 +108,7 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@ -115,7 +116,6 @@ class LCodeGen;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@ -132,11 +132,14 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
@ -163,6 +166,7 @@ class LCodeGen;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@ -257,8 +261,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
@ -270,6 +272,11 @@ class LInstruction: public ZoneObject {
#endif
private:
// Iterator support.
friend class InputIterator;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@ -289,7 +296,6 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
@ -299,6 +305,9 @@ class LTemplateInstruction: public LInstruction {
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
virtual int InputCount() { return I; }
};
@ -859,6 +868,7 @@ class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
};
@ -993,6 +1003,16 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
};
class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMapEnumLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
};
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
@ -1083,6 +1103,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
public:
LMathMinMax(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
@ -1591,6 +1623,16 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@ -1601,6 +1643,16 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
@ -2177,13 +2229,15 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
};
class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
class LForInCacheArray: public LTemplateInstruction<1, 1, 1> {
public:
explicit LForInCacheArray(LOperand* map) {
explicit LForInCacheArray(LOperand* map, LOperand* scratch) {
inputs_[0] = map;
temps_[0] = scratch;
}
LOperand* map() { return inputs_[0]; }
LOperand* scratch() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
@ -2222,65 +2276,13 @@ class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
class LChunkBuilder;
class LChunk: public ZoneObject {
class LPlatformChunk: public LChunk {
public:
explicit LChunk(CompilationInfo* info, HGraph* graph);
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
LPlatformChunk(CompilationInfo* info, HGraph* graph)
: LChunk(info, graph) { }
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
return LLabel::cast(instructions_[first_instruction]);
}
int LookupDestination(int block_id) const {
LLabel* cur = GetLabel(block_id);
while (cur->replacement() != NULL) {
cur = cur->replacement();
}
return cur->block_id();
}
Label* GetAssemblyLabel(int block_id) const {
LLabel* label = GetLabel(block_id);
ASSERT(!label->HasReplacement());
return label->label();
}
const ZoneList<Handle<JSFunction> >* inlined_closures() const {
return &inlined_closures_;
}
void AddInlinedClosure(Handle<JSFunction> closure) {
inlined_closures_.Add(closure, zone());
}
Zone* zone() const { return graph_->zone(); }
private:
int spill_slot_count_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@ -2299,10 +2301,10 @@ class LChunkBuilder BASE_EMBEDDED {
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
pending_deoptimization_ast_id_(BailoutId::None()) { }
// Build the sequence for the graph.
LChunk* Build();
LPlatformChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
@ -2321,7 +2323,7 @@ class LChunkBuilder BASE_EMBEDDED {
ABORTED
};
LChunk* chunk() const { return chunk_; }
LPlatformChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
@ -2331,7 +2333,7 @@ class LChunkBuilder BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
void Abort(const char* format, ...);
void Abort(const char* reason);
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
@ -2421,7 +2423,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
@ -2433,7 +2435,7 @@ class LChunkBuilder BASE_EMBEDDED {
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};

503
deps/v8/src/arm/lithium-codegen-arm.cc

@ -91,17 +91,8 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
SmartArrayPointer<char> name(
info()->shared_info()->DebugName()->ToCString());
PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
va_end(arguments);
PrintF("\n");
}
void LCodeGen::Abort(const char* reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
@ -127,6 +118,8 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -322,7 +315,8 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
HConstant* constant = chunk_->LookupConstant(const_op);
Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@ -360,7 +354,8 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
HConstant* constant = chunk_->LookupConstant(const_op);
Handle<Object> literal = constant->handle();
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
@ -386,9 +381,9 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return literal;
return constant->handle();
}
@ -398,33 +393,33 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
value->Number());
return static_cast<int32_t>(value->Number());
ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
HConstant* constant = chunk_->LookupConstant(op);
ASSERT(constant->HasDoubleValue());
return constant->DoubleValue();
}
Operand LCodeGen::ToOperand(LOperand* op) {
if (op->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(op);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
return Operand(static_cast<int32_t>(literal->Number()));
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
Abort("ToOperand Unsupported double immediate.");
}
ASSERT(r.IsTagged());
return Operand(literal);
return Operand(constant->handle());
} else if (op->IsRegister()) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
@ -478,7 +473,10 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
int closure_id = *info()->closure() != *environment->closure()
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@ -486,11 +484,19 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
ASSERT(translation_size == 1);
ASSERT(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
ASSERT(translation_size == 2);
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@ -502,7 +508,8 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i));
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@ -510,18 +517,23 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
false,
false);
}
}
AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
bool is_tagged,
bool is_uint32) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
@ -530,6 +542,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
@ -543,6 +557,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
} else if (is_uint32) {
translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
@ -550,8 +566,8 @@ void LCodeGen::AddToTranslation(Translation* translation,
DoubleRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(literal);
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle());
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
@ -698,13 +714,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
data->SetLiteralArray(*literals);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
data->SetAstId(i, Smi::FromInt(env->ast_id()));
data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
@ -1528,6 +1544,13 @@ void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
}
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->InputAt(0));
__ EnumLength(result, map);
}
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
@ -1574,11 +1597,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
#ifdef DEBUG
__ AbortIfSmi(object);
__ tst(object, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
__ Assert(eq, "Trying to get date field from non-date.");
#endif
DeoptimizeIf(ne, instr->environment());
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@ -1642,6 +1664,68 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
HMathMinMax::Operation operation = instr->hydrogen()->operation();
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
if (instr->hydrogen()->representation().IsInteger32()) {
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
? ToOperand(right)
: Operand(EmitLoadRegister(right, ip));
Register result_reg = ToRegister(instr->result());
__ cmp(left_reg, right_op);
if (!result_reg.is(left_reg)) {
__ mov(result_reg, left_reg, LeaveCC, condition);
}
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
DoubleRegister left_reg = ToDoubleRegister(left);
DoubleRegister right_reg = ToDoubleRegister(right);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Label check_nan_left, check_zero, return_left, return_right, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
__ b(vs, &check_nan_left);
__ b(eq, &check_zero);
__ b(condition, &return_left);
__ b(al, &return_right);
__ bind(&check_zero);
__ VFPCompareAndSetFlags(left_reg, 0.0);
__ b(ne, &return_left); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
// We could use a single 'vorr' instruction here if we had NEON support.
__ vneg(left_reg, left_reg);
__ vsub(result_reg, left_reg, right_reg);
__ vneg(result_reg, result_reg);
} else {
// Since we operate on +0 and/or -0, vadd and vand have the same effect;
// the decision for vadd is easy because vand is a NEON instruction.
__ vadd(result_reg, left_reg, right_reg);
}
__ b(al, &done);
__ bind(&check_nan_left);
__ VFPCompareAndSetFlags(left_reg, left_reg);
__ b(vs, &return_left); // left == NaN.
__ bind(&return_right);
if (!right_reg.is(result_reg)) {
__ vmov(result_reg, right_reg);
}
__ b(al, &done);
__ bind(&return_left);
if (!left_reg.is(result_reg)) {
__ vmov(result_reg, left_reg);
}
__ bind(&done);
}
}
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
@ -2152,9 +2236,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (FLAG_debug_code) {
__ AbortIfNotString(input);
}
__ AbortIfNotString(input);
__ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(result, result);
@ -2390,12 +2472,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 4;
static const int kAdditionalDelta = 5;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
@ -2574,9 +2662,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name,
LEnvironment* env) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
type->LookupDescriptor(NULL, *name, &lookup);
ASSERT(lookup.IsFound() || lookup.IsCacheable());
if (lookup.IsFound() && lookup.type() == FIELD) {
if (lookup.IsField()) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
int offset = index * kPointerSize;
if (index < 0) {
@ -2588,7 +2676,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
} else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
} else if (lookup.IsConstantFunction()) {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
__ LoadHeapObject(result, function);
} else {
@ -2772,15 +2860,31 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
Register key = EmitLoadRegister(instr->key(), scratch0());
// Even though the HLoadKeyedFastElement instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ ldr(result, FieldMemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2804,8 +2908,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
DwVfpRegister result = ToDoubleRegister(instr->result());
Register scratch = scratch0();
int shift_size =
ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@ -2817,14 +2922,15 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
? Operand(((constant_key + instr->additional_index()) << shift_size) +
? Operand(((constant_key + instr->additional_index()) <<
element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
(instr->additional_index() << element_size_shift)));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2837,6 +2943,42 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int additional_index,
int additional_offset) {
if (additional_index != 0 && !key_is_constant) {
additional_index *= 1 << (element_size - shift_size);
__ add(scratch0(), key, Operand(additional_index));
}
if (key_is_constant) {
return MemOperand(base,
(constant_key << element_size) + additional_offset);
}
if (additional_index == 0) {
if (shift_size >= 0) {
return MemOperand(base, key, LSL, shift_size);
} else {
ASSERT_EQ(-1, shift_size);
return MemOperand(base, key, LSR, 1);
}
}
if (shift_size >= 0) {
return MemOperand(base, scratch0(), LSL, shift_size);
} else {
ASSERT_EQ(-1, shift_size);
return MemOperand(base, scratch0(), LSR, 1);
}
}
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
@ -2852,15 +2994,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << shift_size)
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
@ -2871,15 +3015,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
}
} else {
Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset)
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@ -2899,11 +3038,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
__ cmp(result, Operand(0x80000000));
// TODO(danno): we could be more clever here, perhaps having a special
// version of the stub that detects if the overflow case actually
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(cs, instr->environment());
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr->environment());
}
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
@ -3089,7 +3227,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ LoadHeapObject(result, instr->hydrogen()->closure());
__ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@ -3119,7 +3257,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
}
@ -3146,14 +3284,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ LoadHeapObject(r1, function);
}
// Change context if needed.
bool change_context =
(info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
}
// Change context.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set r0 to arguments count if adaption is not needed. Assumes that r0
// is available to write to at this point.
@ -3481,11 +3613,11 @@ void LCodeGen::DoRandom(LRandom* instr) {
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
// r2: FixedArray of the global context's random seeds
// r2: FixedArray of the native context's random seeds
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
@ -3775,8 +3907,40 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
HValue* value,
LOperand* operand) {
if (value->representation().IsTagged() && !value->type().IsSmi()) {
if (operand->IsRegister()) {
__ tst(ToRegister(operand), Operand(kSmiTagMask));
} else {
__ mov(ip, ToOperand(operand));
__ tst(ip, Operand(kSmiTagMask));
}
DeoptimizeIf(ne, environment);
}
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
DeoptIfTaggedButNotSmi(instr->environment(),
instr->hydrogen()->length(),
instr->length());
DeoptIfTaggedButNotSmi(instr->environment(),
instr->hydrogen()->index(),
instr->index());
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsTagged()) {
__ mov(ip, Operand(Smi::FromInt(constant_index)));
} else {
__ mov(ip, Operand(constant_index));
}
__ cmp(ip, ToRegister(instr->length()));
} else {
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
}
DeoptimizeIf(hs, instr->environment());
}
@ -3786,31 +3950,37 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
// Even though the HLoadKeyedFastElement instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ str(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
@ -3841,9 +4011,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} else {
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
? Operand((constant_key << shift_size) +
? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@ -3861,7 +4033,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
__ vstr(value, scratch, instr->additional_index() << shift_size);
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@ -3881,15 +4053,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
} else {
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size));
Operand operand(key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
@ -3899,16 +4074,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
}
} else {
Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer,
((constant_key + instr->additional_index())
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -4131,12 +4300,26 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
LOperand* output = instr->result();
SwVfpRegister flt_scratch = double_scratch0().low();
__ vmov(flt_scratch, ToRegister(input));
__ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI: public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_,
instr_->InputAt(0),
SIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
@ -4152,9 +4335,38 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
}
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU: public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredNumberTagI(instr_,
instr_->InputAt(0),
UNSIGNED_INT32);
}
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagU* instr_;
};
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
__ cmp(reg, Operand(Smi::kMaxValue));
__ b(hi, deferred->entry());
__ SmiTag(reg, reg);
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(instr->InputAt(0));
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
@ -4162,16 +4374,22 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
Label done;
if (dst.is(src)) {
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
if (dst.is(src)) {
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
@ -4751,7 +4969,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Heap* heap = isolate()->heap();
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
@ -4771,12 +4989,12 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(ne, instr->environment());
}
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Set up the parameters to the stub/runtime call.
__ LoadHeapObject(r3, literals);
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@ -4870,8 +5088,8 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
int32_t value_low = value & 0xFFFFFFFF;
int32_t value_high = value >> 32;
int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(r2, Operand(value_low));
@ -4984,15 +5202,13 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
// r3 = JS function.
// r7 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
// r2 and r4-r6 are used as temporaries.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
int literal_offset = FixedArray::kHeaderSize +
instr->hydrogen()->literal_index() * kPointerSize;
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ LoadHeapObject(r7, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r7, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
@ -5356,13 +5572,24 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
Register scratch = ToRegister(instr->scratch());
Label load_cache, done;
__ EnumLength(result, map);
__ cmp(result, Operand(Smi::FromInt(0)));
__ b(ne, &load_cache);
__ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
__ jmp(&done);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result, scratch);
__ ldr(result,
FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
DeoptimizeIf(eq, instr->environment());
__ bind(&done);
}

51
deps/v8/src/arm/lithium-codegen-arm.h

@ -43,26 +43,25 @@ class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
Zone* zone)
: chunk_(chunk),
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: zone_(info->zone()),
chunk_(static_cast<LPlatformChunk*>(chunk)),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, zone),
deopt_jump_table_(4, zone),
deoptimization_literals_(8, zone),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(zone),
deferred_(8, zone),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
safepoints_(zone),
zone_(zone),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -115,7 +114,12 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
@ -133,6 +137,15 @@ class LCodeGen BASE_EMBEDDED {
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
MemOperand PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
int constant_key,
int element_size,
int shift_size,
int additional_index,
int additional_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@ -158,7 +171,7 @@ class LCodeGen BASE_EMBEDDED {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
LChunk* chunk() const { return chunk_; }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@ -178,7 +191,7 @@ class LCodeGen BASE_EMBEDDED {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Abort(const char* reason);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
@ -244,7 +257,8 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged);
bool is_tagged,
bool is_uint32);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@ -289,6 +303,10 @@ class LCodeGen BASE_EMBEDDED {
bool deoptimize_on_minus_zero,
LEnvironment* env);
void DeoptIfTaggedButNotSmi(LEnvironment* environment,
HValue* value,
LOperand* operand);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@ -350,7 +368,8 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt();
LChunk* const chunk_;
Zone* zone_;
LPlatformChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@ -372,8 +391,6 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
Zone* zone_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;

207
deps/v8/src/arm/macro-assembler-arm.cc

@ -137,7 +137,19 @@ int MacroAssembler::CallSize(
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
size += kInstrSize;
}
return size;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(
Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
size += kInstrSize;
}
return size;
@ -179,7 +191,7 @@ void MacroAssembler::Call(Address target,
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
TypeFeedbackId ast_id,
Condition cond) {
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@ -187,12 +199,12 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
TypeFeedbackId ast_id,
Condition cond) {
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
@ -265,8 +277,8 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@ -276,12 +288,12 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
!src2.must_use_constant_pool() &&
!src2.must_use_constant_pool(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
} else if (!src2.is_single_instruction() &&
!src2.must_use_constant_pool() &&
} else if (!src2.is_single_instruction(this) &&
!src2.must_use_constant_pool(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
@ -296,7 +308,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@ -311,7 +323,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@ -339,7 +351,7 @@ void MacroAssembler::Bfi(Register dst,
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@ -353,7 +365,7 @@ void MacroAssembler::Bfi(Register dst,
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@ -364,7 +376,7 @@ void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7)) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@ -672,7 +684,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@ -714,7 +726,7 @@ void MacroAssembler::Strd(Register src1, Register src2,
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@ -778,7 +790,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@ -930,6 +942,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@ -1338,31 +1351,32 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "we should not have an empty lexical context");
#endif
// Load the global context of the current context.
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a global context.
// Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the global_context_map.
// Read the first word and compare to the native_context_map.
ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
Check(eq, "JSGlobalObject::global_context should be a global context.");
Check(eq, "JSGlobalObject::native_context should be a native context.");
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
cmp(scratch, Operand(ip));
b(eq, &same_contexts);
// Check the context is a global context.
// Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
@ -1374,13 +1388,13 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(ne, "JSGlobalProxy::context() should not be null.");
ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
LoadRoot(ip, Heap::kNativeContextMapRootIndex);
cmp(holder_reg, ip);
Check(eq, "JSGlobalObject::global_context should be a global context.");
Check(eq, "JSGlobalObject::native_context should be a native context.");
// Restore ip is not needed. ip is reloaded below.
pop(holder_reg); // Restore holder.
// Restore ip to holder's context.
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
@ -1967,7 +1981,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@ -1984,7 +1998,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
@ -2135,7 +2149,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
}
@ -2331,8 +2345,8 @@ void MacroAssembler::ConvertToInt32(Register source,
Register scratch2,
DwVfpRegister double_scratch,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(double_scratch, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(double_scratch.low(), double_scratch);
@ -2427,8 +2441,8 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
@ -2546,7 +2560,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register input_high,
Register input_low) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@ -2585,7 +2599,7 @@ void MacroAssembler::EmitECMATruncate(Register result,
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
@ -2703,7 +2717,8 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
Builtins::JavaScript id) {
// Load the builtins object into target register.
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, FieldMemOperand(target,
@ -2869,8 +2884,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
ldr(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
ldr(scratch,
@ -2916,11 +2932,12 @@ void MacroAssembler::LoadInitialArrayMap(
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Load the global context from the global or builtins object.
ldr(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
ldr(function, FieldMemOperand(function,
GlobalObject::kGlobalContextOffset));
// Load the function from the global context.
GlobalObject::kNativeContextOffset));
// Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
}
@ -3332,6 +3349,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@ -3342,6 +3360,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@ -3360,6 +3379,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);
@ -3664,67 +3684,80 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
// In 0-255 range, round and truncate.
bind(&in_bounds);
Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
vmov(result_reg, temp_double_reg.low());
// Save FPSCR.
vmrs(ip);
// Set rounding mode to round to the nearest integer by clearing bits[23:22].
bic(result_reg, ip, Operand(kVFPRoundingModeMask));
vmsr(result_reg);
vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
vmov(result_reg, input_reg.low());
// Restore FPSCR.
vmsr(ip);
bind(&done);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
ldr(descriptors,
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, &not_smi);
Register descriptors,
Register scratch) {
Register temp = descriptors;
ldr(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset));
Label ok, fail;
CheckMap(temp,
scratch,
isolate()->factory()->fixed_array_map(),
&fail,
DONT_DO_SMI_CHECK);
ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset));
jmp(&ok);
bind(&fail);
mov(descriptors, Operand(FACTORY->empty_descriptor_array()));
bind(&not_smi);
bind(&ok);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r7;
LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
mov(r1, r0);
bind(&next);
Label next, start;
mov(r2, r0);
// Check that there are no elements. Register r1 contains the
// current JS object we've reached through the prototype chain.
ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
cmp(r2, empty_fixed_array_value);
b(ne, call_runtime);
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in r2 for the subsequent
// prototype load.
ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(r3, call_runtime);
EnumLength(r3, r1);
cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
b(eq, call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
// index field does not contain a smi.
ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
JumpIfSmi(r3, call_runtime);
jmp(&start);
bind(&next);
ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
cmp(r1, r0);
b(eq, &check_prototype);
ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
cmp(r3, empty_fixed_array_value);
EnumLength(r3, r1);
cmp(r3, Operand(Smi::FromInt(0)));
b(ne, call_runtime);
bind(&start);
// Check that there are no elements. Register r2 contains the current JS
// object we've reached through the prototype chain.
ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
cmp(r2, empty_fixed_array_value);
b(ne, call_runtime);
// Load the prototype from the map and loop if non-null.
bind(&check_prototype);
ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
cmp(r1, null_value);
ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
cmp(r2, null_value);
b(ne, &next);
}

28
deps/v8/src/arm/macro-assembler-arm.h

@ -110,17 +110,18 @@ class MacroAssembler: public Assembler {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
static int CallSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Ret(Condition cond = al);
@ -499,8 +500,8 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the global context if the map in register
// map_in_out is the cached Array map in the global context of
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
@ -1268,7 +1269,10 @@ class MacroAssembler: public Assembler {
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadInstanceDescriptors(Register map,
Register descriptors,
Register scratch);
void EnumLength(Register dst, Register map);
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -1376,7 +1380,7 @@ inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_INDEX);
return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
}

9
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -35,14 +35,7 @@ namespace v8 {
namespace internal {
#ifdef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerARM();
virtual ~RegExpMacroAssemblerARM();
};
#else // V8_INTERPRETED_REGEXP
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);

88
deps/v8/src/arm/simulator-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -276,7 +276,7 @@ void ArmDebugger::Debug() {
// make them invisible to all commands.
UndoBreakpoints();
while (!done) {
while (!done && !sim_->has_bad_pc()) {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@ -945,73 +945,31 @@ unsigned int Simulator::get_s_register(int sreg) const {
}
void Simulator::set_s_register_from_float(int sreg, const float flt) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
// Read the bits from the single precision floating point value
// into the unsigned integer element of vfp_register[] given by index=sreg.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &flt, sizeof(vfp_register[0]));
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
}
template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < num_d_registers);
void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
// Read the bits from the integer value into the unsigned integer element of
// vfp_register[] given by index=sreg.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &sint, sizeof(vfp_register[0]));
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
char buffer[register_size * sizeof(vfp_register[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
memcpy(&vfp_register[reg_index * register_size], buffer,
register_size * sizeof(vfp_register[0]));
}
void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
// Read the bits from the double precision floating point value into the two
// consecutive unsigned integer elements of vfp_register[] given by index
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
}
template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < num_d_registers);
float Simulator::get_float_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
float sm_val = 0.0;
// Read the bits from the unsigned integer vfp_register[] array
// into the single precision floating point value and return it.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
return(sm_val);
}
int Simulator::get_sinteger_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
int sm_val = 0;
// Read the bits from the unsigned integer vfp_register[] array
// into the single precision floating point value and return it.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
return(sm_val);
}
double Simulator::get_double_from_d_register(int dreg) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
double dm_val = 0.0;
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[register_size * reg_index],
register_size * sizeof(vfp_register[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
return value;
}
@ -2408,7 +2366,7 @@ void Simulator::DecodeType01(Instruction* instr) {
// Format(instr, "cmn'cond 'rn, 'imm");
alu_out = rn_val + shifter_operand;
SetNZFlags(alu_out);
SetCFlag(!CarryFrom(rn_val, shifter_operand));
SetCFlag(CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
// Other instructions matching this pattern are handled in the

36
deps/v8/src/arm/simulator-arm.h

@ -163,12 +163,30 @@ class Simulator {
// Support for VFP.
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
void set_d_register_from_double(int dreg, const double& dbl);
double get_double_from_d_register(int dreg);
void set_s_register_from_float(int sreg, const float dbl);
float get_float_from_s_register(int sreg);
void set_s_register_from_sinteger(int reg, const int value);
int get_sinteger_from_s_register(int reg);
void set_d_register_from_double(int dreg, const double& dbl) {
SetVFPRegister<double, 2>(dreg, dbl);
}
double get_double_from_d_register(int dreg) {
return GetFromVFPRegister<double, 2>(dreg);
}
void set_s_register_from_float(int sreg, const float flt) {
SetVFPRegister<float, 1>(sreg, flt);
}
float get_float_from_s_register(int sreg) {
return GetFromVFPRegister<float, 1>(sreg);
}
void set_s_register_from_sinteger(int sreg, const int sint) {
SetVFPRegister<int, 1>(sreg, sint);
}
int get_sinteger_from_s_register(int sreg) {
return GetFromVFPRegister<int, 1>(sreg);
}
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@ -332,6 +350,12 @@ class Simulator {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
template<class ReturnType, int register_size>
ReturnType GetFromVFPRegister(int reg_index);
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q

312
deps/v8/src/arm/stub-cache-arm.cc

@ -283,11 +283,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
// Load the global or builtins object from the current context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Load the global context from the global or builtins object.
__ ldr(prototype,
FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
// Load the function from the global context.
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
__ ldr(prototype,
FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
__ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ ldr(prototype,
@ -304,13 +305,14 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
Label* miss) {
Isolate* isolate = masm->isolate();
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ Move(ip, isolate->global());
__ ldr(prototype,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ Move(ip, isolate->global_object());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index.
Handle<JSFunction> function(
JSFunction::cast(isolate->global_context()->get(index)));
JSFunction::cast(isolate->native_context()->get(index)));
// Load its initial map. The global functions all have initial maps.
__ Move(prototype, Handle<Map>(function->initial_map()));
// Load the prototype from the initial map.
@ -986,8 +988,8 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
@ -1230,6 +1232,45 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
}
void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
ASSERT(!receiver.is(scratch1));
ASSERT(!receiver.is(scratch2));
ASSERT(!receiver.is(scratch3));
// Load the properties dictionary.
Register dictionary = scratch1;
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Probe the dictionary.
Label probe_done;
StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
miss,
&probe_done,
dictionary,
name_reg,
scratch2,
scratch3);
__ bind(&probe_done);
// If probing finds an entry in the dictionary, scratch3 contains the
// pointer into the dictionary. Check that the value is the callback.
Register pointer = scratch3;
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
__ cmp(scratch2, Operand(callback));
__ b(ne, miss);
}
void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
Register receiver,
@ -1237,6 +1278,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Handle<AccessorInfo> callback,
Handle<String> name,
Label* miss) {
@ -1247,6 +1289,11 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register reg = CheckPrototypes(object, receiver, holder, scratch1,
scratch2, scratch3, name, miss);
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
GenerateDictionaryLoadCallback(
reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
}
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
__ push(receiver);
@ -1303,7 +1350,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// later.
bool compile_followup_inline = false;
if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
lookup->GetCallbackObject()->IsAccessorInfo()) {
@ -1377,7 +1424,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
miss);
}
if (lookup->type() == FIELD) {
if (lookup->IsField()) {
// We found FIELD property in prototype chain of interceptor's holder.
// Retrieve a field from field's holder.
GenerateFastPropertyLoad(masm(), r0, holder_reg,
@ -1526,7 +1573,7 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
return GetCode(FIELD, name);
return GetCode(Code::FIELD, name);
}
@ -2071,7 +2118,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
GenerateMissBranch();
// Return the generated code.
return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@ -2089,11 +2136,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP3)) {
if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
CpuFeatures::Scope scope_vfp3(VFP3);
CpuFeatures::Scope scope_vfp2(VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@ -2217,7 +2264,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
GenerateMissBranch();
// Return the generated code.
return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@ -2316,7 +2363,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
GenerateMissBranch();
// Return the generated code.
return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
}
@ -2533,7 +2580,7 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
GenerateMissBranch();
// Return the generated code.
return GetCode(INTERCEPTOR, name);
return GetCode(Code::INTERCEPTOR, name);
}
@ -2591,7 +2638,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
GenerateMissBranch();
// Return the generated code.
return GetCode(NORMAL, name);
return GetCode(Code::NORMAL, name);
}
@ -2619,14 +2666,17 @@ Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
return GetCode(transition.is_null()
? Code::FIELD
: Code::MAP_TRANSITION, name);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<AccessorInfo> callback,
Handle<String> name) {
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<AccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@ -2634,19 +2684,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the maps haven't changed.
__ JumpIfSmi(r1, &miss);
CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
// Check that the map of the object hasn't changed.
__ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(r1, r3, &miss);
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(r1); // receiver
__ mov(ip, Operand(callback)); // callback info
@ -2664,37 +2707,40 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<JSObject> receiver,
Handle<JSFunction> setter,
Handle<String> name) {
#undef __
#define __ ACCESS_MASM(masm)
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(r0);
// Call the JavaScript getter with the receiver and the value on the stack.
__ Push(r1, r0);
ParameterCount actual(1);
__ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
__ Push(r1, r0);
ParameterCount actual(1);
__ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
// We have to return the passed value, not the return value of the setter.
__ pop(r0);
@ -2703,13 +2749,38 @@ Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
#undef __
#define __ ACCESS_MASM(masm())
Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the maps haven't changed.
__ JumpIfSmi(r1, &miss);
CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
GenerateStoreViaSetter(masm(), setter);
__ bind(&miss);
Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -2754,7 +2825,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(INTERCEPTOR, name);
return GetCode(Code::INTERCEPTOR, name);
}
@ -2800,7 +2871,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, name);
return GetCode(Code::NORMAL, name);
}
@ -2835,7 +2906,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NONEXISTENT, factory()->empty_string());
return GetCode(Code::NONEXISTENT, factory()->empty_string());
}
@ -2855,7 +2926,7 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(FIELD, name);
return GetCode(Code::FIELD, name);
}
@ -2870,16 +2941,53 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
// -- lr : return address
// -----------------------------------
Label miss;
GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
#undef __
#define __ ACCESS_MASM(masm)
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
Handle<JSFunction> getter) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
__ push(r0);
ParameterCount actual(0);
__ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
#undef __
#define __ ACCESS_MASM(masm())
Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
Handle<String> name,
Handle<JSObject> receiver,
@ -2896,25 +3004,13 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
__ JumpIfSmi(r0, &miss);
CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
{
FrameScope scope(masm(), StackFrame::INTERNAL);
// Call the JavaScript getter with the receiver on the stack.
__ push(r0);
ParameterCount actual(0);
__ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
GenerateLoadViaGetter(masm(), getter);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -2934,7 +3030,7 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(CONSTANT_FUNCTION, name);
return GetCode(Code::CONSTANT_FUNCTION, name);
}
@ -2956,7 +3052,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(INTERCEPTOR, name);
return GetCode(Code::INTERCEPTOR, name);
}
@ -2998,7 +3094,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NORMAL, name);
return GetCode(Code::NORMAL, name);
}
@ -3021,7 +3117,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(FIELD, name);
return GetCode(Code::FIELD, name);
}
@ -3041,12 +3137,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(name));
__ b(ne, &miss);
GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -3071,7 +3167,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
return GetCode(CONSTANT_FUNCTION, name);
return GetCode(Code::CONSTANT_FUNCTION, name);
}
@ -3097,7 +3193,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(INTERCEPTOR, name);
return GetCode(Code::INTERCEPTOR, name);
}
@ -3118,7 +3214,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -3144,7 +3240,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -3169,7 +3265,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
__ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
return GetCode(CALLBACKS, name);
return GetCode(Code::CALLBACKS, name);
}
@ -3189,7 +3285,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, factory()->empty_string());
return GetCode(Code::NORMAL, factory()->empty_string());
}
@ -3217,7 +3313,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@ -3256,7 +3352,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
return GetCode(transition.is_null()
? Code::FIELD
: Code::MAP_TRANSITION, name);
}
@ -3280,7 +3378,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, factory()->empty_string());
return GetCode(Code::NORMAL, factory()->empty_string());
}
@ -3319,7 +3417,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
}
@ -3549,8 +3647,8 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch1,
DwVfpRegister double_scratch0,
Label* fail) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
@ -3636,8 +3734,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 1));
__ vldr(s0, r2, 0);
} else {
@ -3645,8 +3743,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ add(r2, r3, Operand(key, LSL, 2));
__ vldr(d0, r2, 0);
} else {
@ -3697,8 +3795,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
__ sub(r3, r0, Operand(kHeapObjectTag));
@ -3725,8 +3823,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
Label box_int, done;
__ tst(value, Operand(0xC0000000));
__ b(ne, &box_int);
@ -3789,8 +3887,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
@ -3857,8 +3955,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
@ -3983,7 +4081,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
@ -3993,7 +4091,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
CpuFeatures::Scope scope(VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
@ -4028,8 +4126,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not

151
deps/v8/src/array.js

@ -777,78 +777,103 @@ function ArraySort(comparefn) {
}
};
var QuickSort = function QuickSort(a, from, to) {
// Insertion sort is faster for short arrays.
if (to - from <= 10) {
InsertionSort(a, from, to);
return;
var GetThirdIndex = function(a, from, to) {
var t_array = [];
// Use both 'from' and 'to' to determine the pivot candidates.
var increment = 200 + ((to - from) & 15);
for (var i = from + 1; i < to - 1; i += increment) {
t_array.push([i, a[i]]);
}
// Find a pivot as the median of first, last and middle element.
var v0 = a[from];
var v1 = a[to - 1];
var middle_index = from + ((to - from) >> 1);
var v2 = a[middle_index];
var c01 = %_CallFunction(receiver, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
var c02 = %_CallFunction(receiver, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
v0 = v2;
v2 = v1;
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
var c12 = %_CallFunction(receiver, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
v1 = v2;
v2 = tmp;
t_array.sort(function(a, b) {
return %_CallFunction(receiver, a[1], b[1], comparefn) } );
var third_index = t_array[t_array.length >> 1][0];
return third_index;
}
var QuickSort = function QuickSort(a, from, to) {
var third_index = 0;
while (true) {
// Insertion sort is faster for short arrays.
if (to - from <= 10) {
InsertionSort(a, from, to);
return;
}
}
// v0 <= v1 <= v2
a[from] = v0;
a[to - 1] = v2;
var pivot = v1;
var low_end = from + 1; // Upper bound of elements lower than pivot.
var high_start = to - 1; // Lower bound of elements greater than pivot.
a[middle_index] = a[low_end];
a[low_end] = pivot;
// From low_end to i are elements equal to pivot.
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
low_end++;
} else if (order > 0) {
do {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
a[i] = a[high_start];
a[high_start] = element;
if (to - from > 1000) {
third_index = GetThirdIndex(a, from, to);
} else {
third_index = from + ((to - from) >> 1);
}
// Find a pivot as the median of first, last and middle element.
var v0 = a[from];
var v1 = a[to - 1];
var v2 = a[third_index];
var c01 = %_CallFunction(receiver, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
var c02 = %_CallFunction(receiver, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
v0 = v2;
v2 = v1;
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
var c12 = %_CallFunction(receiver, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
v1 = v2;
v2 = tmp;
}
}
// v0 <= v1 <= v2
a[from] = v0;
a[to - 1] = v2;
var pivot = v1;
var low_end = from + 1; // Upper bound of elements lower than pivot.
var high_start = to - 1; // Lower bound of elements greater than pivot.
a[third_index] = a[low_end];
a[low_end] = pivot;
// From low_end to i are elements equal to pivot.
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
element = a[i];
a[i] = a[low_end];
a[low_end] = element;
low_end++;
} else if (order > 0) {
do {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
a[i] = a[high_start];
a[high_start] = element;
if (order < 0) {
element = a[i];
a[i] = a[low_end];
a[low_end] = element;
low_end++;
}
}
}
if (to - high_start < low_end - from) {
QuickSort(a, high_start, to);
to = low_end;
} else {
QuickSort(a, from, low_end);
from = high_start;
}
}
QuickSort(a, from, low_end);
QuickSort(a, high_start, to);
};
// Copy elements in the range 0..length from obj's prototype chain

51
deps/v8/src/assembler.cc

@ -141,7 +141,7 @@ int Label::pos() const {
// an iteration.
//
// The encoding relies on the fact that there are fewer than 14
// different non-compactly encoded relocation modes.
// different relocation modes using standard non-compact encoding.
//
// The first byte of a relocation record has a tag in its low 2 bits:
// Here are the record schemes, depending on the low tag and optional higher
@ -173,7 +173,9 @@ int Label::pos() const {
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
// 1101: not used (would allow one more relocation mode to be added)
// 1101: constant pool. Used on ARM only for now.
// The format is: 11 1101 11
// signed int (size of the constant pool).
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
@ -194,7 +196,7 @@ int Label::pos() const {
// dropped, and last non-zero chunk tagged with 1.)
const int kMaxRelocModes = 14;
const int kMaxStandardNonCompactModes = 14;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@ -228,6 +230,9 @@ const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 3;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@ -285,6 +290,15 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data = data >> kBitsPerByte;
}
}
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@ -300,8 +314,8 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
byte* begin_pos = pos_;
#endif
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
kMaxRelocModes);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@ -347,6 +361,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else if (RelocInfo::IsConstPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@ -397,6 +414,15 @@ void RelocIterator::AdvanceReadId() {
}
void RelocIterator::AdvanceReadConstPoolData() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
rinfo_.data_ = x;
}
void RelocIterator::AdvanceReadPosition() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@ -500,8 +526,7 @@ void RelocIterator::next() {
ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpExtraTag) {
int top_tag = GetTopTag();
if (top_tag == kVariableLengthPCJumpTopTag) {
if (GetTopTag() == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
@ -531,6 +556,13 @@ void RelocIterator::next() {
}
Advance(kIntptrSize);
}
} else if ((extra_tag == kConstPoolExtraTag) &&
(GetTopTag() == kConstPoolTag)) {
if (SetMode(RelocInfo::CONST_POOL)) {
AdvanceReadConstPoolData();
return;
}
Advance(kIntSize);
} else {
AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
@ -613,6 +645,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
case RelocInfo::CONST_POOL:
return "constant pool";
case RelocInfo::DEBUG_BREAK_SLOT:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
@ -698,6 +732,7 @@ void RelocInfo::Verify() {
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case CONST_POOL:
case DEBUG_BREAK_SLOT:
case NONE:
break;
@ -1057,7 +1092,7 @@ ExternalReference ExternalReference::re_word_character_map() {
ExternalReference ExternalReference::address_of_static_offsets_vector(
Isolate* isolate) {
return ExternalReference(
OffsetsVector::static_offsets_vector_address(isolate));
reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address(

15
deps/v8/src/assembler.h

@ -51,7 +51,6 @@ class ApiFunction;
namespace internal {
struct StatsCounter;
const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@ -204,14 +203,19 @@ class RelocInfo BASE_EMBEDDED {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Marks a constant pool. Only used on ARM.
// It uses a custom noncompact encoding.
CONST_POOL,
// add more as needed
// Pseudo-types
NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding.
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
};
@ -240,6 +244,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
}
static inline bool IsConstPool(Mode mode) {
return mode == CONST_POOL;
}
static inline bool IsPosition(Mode mode) {
return mode == POSITION || mode == STATEMENT_POSITION;
}
@ -416,6 +423,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedConstPoolData(int data);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
@ -466,6 +474,7 @@ class RelocIterator: public Malloced {
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadConstPoolData();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();

60
deps/v8/src/ast.cc

@ -85,8 +85,8 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
int position,
Interface* interface)
Interface* interface,
int position)
: Expression(isolate),
name_(name),
var_(NULL),
@ -125,7 +125,6 @@ Assignment::Assignment(Isolate* isolate,
value_(value),
pos_(pos),
binary_operation_(NULL),
compound_load_id_(kNoNumber),
assignment_id_(GetNextId(isolate)),
block_start_(false),
block_end_(false),
@ -156,6 +155,11 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
return scope()->AllowsLazyCompilationWithoutContext();
}
int FunctionLiteral::start_position() const {
return scope()->start_position();
}
@ -429,7 +433,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
zone);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
}
@ -438,7 +442,8 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
TypeFeedbackId id = AssignmentFeedbackId();
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
@ -447,24 +452,26 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
oracle->StoreReceiverTypes(this, name, &receiver_types_);
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
TypeFeedbackId id = CountStoreFeedbackId();
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id);
receiver_types_.Clear();
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
}
@ -498,7 +505,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
type->LookupInDescriptors(NULL, *name, &lookup);
type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case CONSTANT_FUNCTION:
@ -513,10 +520,9 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
case INTERCEPTOR:
// We don't know the target.
return false;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Perhaps something interesting is up in the prototype chain...
case TRANSITION:
case NONEXISTENT:
UNREACHABLE();
break;
}
}
@ -1027,6 +1033,14 @@ CaseClause::CaseClause(Isolate* isolate,
increase_node_count(); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_flag(kDontOptimize); \
add_flag(kDontInline); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
}
REGULAR_NODE(VariableDeclaration)
REGULAR_NODE(FunctionDeclaration)
@ -1041,6 +1055,7 @@ REGULAR_NODE(SwitchStatement)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ObjectLiteral)
REGULAR_NODE(RegExpLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(Property)
@ -1057,10 +1072,13 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
// We currently do not optimize any modules. Note in particular, that module
// instance objects associated with ModuleLiterals are allocated during
// scope resolution, and references to them are embedded into the code.
// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleLiteral)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
@ -1070,15 +1088,16 @@ DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
DONT_INLINE_NODE(FunctionLiteral)
DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals.
DONT_INLINE_NODE(FunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
DONT_CACHE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count();
if (node->is_jsruntime()) {
@ -1099,6 +1118,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
#undef DONT_OPTIMIZE_NODE
#undef DONT_INLINE_NODE
#undef DONT_SELFOPTIMIZE_NODE
#undef DONT_CACHE_NODE
Handle<String> Literal::ToString() {

401
deps/v8/src/ast.h

File diff suppressed because it is too large

793
deps/v8/src/bootstrapper.cc

File diff suppressed because it is too large

4
deps/v8/src/bootstrapper.h

@ -104,7 +104,7 @@ class Bootstrapper {
void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
void ReattachGlobal(Handle<Context> env, Handle<JSGlobalProxy> global_proxy);
// Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v);
@ -126,7 +126,7 @@ class Bootstrapper {
char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
bool InstallExtensions(Handle<Context> global_context,
bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
SourceCodeCache* extensions_cache() { return &extensions_cache_; }

43
deps/v8/src/builtins.cc

@ -35,6 +35,7 @@
#include "ic-inl.h"
#include "heap-profiler.h"
#include "mark-compact.h"
#include "stub-cache.h"
#include "vm-state-inl.h"
namespace v8 {
@ -199,11 +200,11 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_length(Smi::FromInt(0));
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context();
Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!global_context->js_array_maps()->IsUndefined()) {
!native_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(global_context->js_array_maps());
FixedArray::cast(native_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
@ -312,7 +313,7 @@ BUILTIN(InternalArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->global_context()->internal_array_function());
isolate->context()->native_context()->internal_array_function());
}
@ -320,7 +321,7 @@ BUILTIN(ArrayCodeGeneric) {
return ArrayCodeGenericCommon(
&args,
isolate,
isolate->context()->global_context()->array_function());
isolate->context()->native_context()->array_function());
}
@ -402,7 +403,7 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
static bool ArrayPrototypeHasNoElements(Heap* heap,
Context* global_context,
Context* native_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
@ -411,7 +412,7 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
Object* proto = array_proto->GetPrototype();
if (proto == heap->null_value()) return false;
array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
if (array_proto != native_context->initial_object_prototype()) return false;
if (array_proto->elements() != heap->empty_fixed_array()) return false;
return array_proto->GetPrototype()->IsNull();
}
@ -461,11 +462,11 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
Context* global_context = heap->isolate()->context()->global_context();
Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
JSObject::cast(native_context->array_function()->prototype());
return receiver->GetPrototype() == array_proto &&
ArrayPrototypeHasNoElements(heap, global_context, array_proto);
ArrayPrototypeHasNoElements(heap, native_context, array_proto);
}
@ -476,7 +477,7 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
GetProperty(Handle<JSObject>(isolate->native_context()->builtins()),
name);
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
@ -706,7 +707,7 @@ BUILTIN(ArraySlice) {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
isolate->context()->global_context()->arguments_boilerplate()->map();
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
@ -943,10 +944,10 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
Heap* heap = isolate->heap();
Context* global_context = isolate->context()->global_context();
Context* native_context = isolate->context()->native_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
if (!ArrayPrototypeHasNoElements(heap, global_context, array_proto)) {
JSObject::cast(native_context->array_function()->prototype());
if (!ArrayPrototypeHasNoElements(heap, native_context, array_proto)) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -1148,6 +1149,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
result->VerifyApiCallResultType();
}
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@ -1224,6 +1226,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
result = heap->undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
result->VerifyApiCallResultType();
}
}
// Check for exceptions and return result.
@ -1291,6 +1294,11 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) {
}
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>());
}
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
KeyedLoadIC::GenerateInitialize(masm);
}
@ -1388,6 +1396,11 @@ static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
}
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
StoreStubCompiler::GenerateStoreViaSetter(masm, Handle<JSFunction>());
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
}

10
deps/v8/src/builtins.h

@ -66,6 +66,8 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
@ -80,6 +82,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ParallelRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
@ -118,6 +122,8 @@ enum BuiltinExtraArguments {
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
@ -152,6 +158,8 @@ enum BuiltinExtraArguments {
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
@ -347,6 +355,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);

8
deps/v8/src/checks.h

@ -284,4 +284,12 @@ extern bool FLAG_enable_slow_asserts;
#define ASSERT_NOT_NULL(p) ASSERT_NE(NULL, p)
// "Extra checks" are lightweight checks that are enabled in some release
// builds.
#ifdef ENABLE_EXTRA_CHECKS
#define EXTRA_CHECK(condition) CHECK(condition)
#else
#define EXTRA_CHECK(condition) ((void) 0)
#endif
#endif // V8_CHECKS_H_

24
deps/v8/src/code-stubs.cc

@ -194,7 +194,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
ASSERT(op_ == (*code_out)->compare_operation());
ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
return true;
}
return false;
@ -478,4 +478,26 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
}
FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer) {
if (entry_hook_ != NULL)
entry_hook_(function, stack_pointer);
}
bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) {
// We don't allow setting a new entry hook over one that's
// already active, as the hooks won't stack.
if (entry_hook != 0 && entry_hook_ != 0)
return false;
entry_hook_ = entry_hook;
return true;
}
} } // namespace v8::internal

45
deps/v8/src/code-stubs.h

@ -73,7 +73,8 @@ namespace internal {
V(DebuggerStatement) \
V(StringDictionaryLookup) \
V(ElementsTransitionAndStore) \
V(StoreArrayLiteralElement)
V(StoreArrayLiteralElement) \
V(ProfileEntryHook)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -161,10 +162,6 @@ class CodeStub BASE_EMBEDDED {
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
@ -222,8 +219,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
class MinorKeyBits: public BitField<uint32_t,
kStubMajorKeyBits, kStubMinorKeyBits> {}; // NOLINT
friend class BreakPointIterator;
};
@ -498,7 +496,7 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
code->set_compare_operation(op_);
code->set_compare_operation(op_ - Token::EQ);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@ -1145,6 +1143,37 @@ class StoreArrayLiteralElementStub : public CodeStub {
DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
};
class ProfileEntryHookStub : public CodeStub {
public:
explicit ProfileEntryHookStub() {}
// The profile entry hook function is not allowed to cause a GC.
virtual bool SometimesSetsUpAFrame() { return false; }
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
// Sets or unsets the entry hook function. Returns true on success,
// false on an attempt to replace a non-NULL entry hook with another
// non-NULL hook.
static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
private:
static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer);
Major MajorKey() { return ProfileEntryHook; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
// The current function entry hook.
static FunctionEntryHook entry_hook_;
DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
};
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_

27
deps/v8/src/collection.js

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -79,7 +79,12 @@ function SetDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
return %SetDelete(this, key);
if (%SetHas(this, key)) {
%SetDelete(this, key);
return true;
} else {
return false;
}
}
@ -124,7 +129,7 @@ function MapHas(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
return !IS_UNDEFINED(%MapGet(this, key));
return %MapHas(this, key);
}
@ -136,12 +141,7 @@ function MapDelete(key) {
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
if (!IS_UNDEFINED(%MapGet(this, key))) {
%MapSet(this, key, void 0);
return true;
} else {
return false;
}
return %MapDelete(this, key);
}
@ -186,7 +186,7 @@ function WeakMapHas(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return !IS_UNDEFINED(%WeakMapGet(this, key));
return %WeakMapHas(this, key);
}
@ -198,12 +198,7 @@ function WeakMapDelete(key) {
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
%WeakMapSet(this, key, void 0);
return true;
} else {
return false;
}
return %WeakMapDelete(this, key);
}
// -------------------------------------------------------------------

38
deps/v8/src/compilation-cache.cc

@ -165,10 +165,12 @@ bool CompilationCacheScript::HasOrigin(
// be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line
// won't.
Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset,
Handle<Context> context) {
Object* result = NULL;
int generation;
@ -177,7 +179,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source), isolate());
Handle<Object> probe(table->Lookup(*source, *context), isolate());
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@ -214,7 +216,7 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, shared);
if (generation != 0) Put(source, context, shared);
isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
@ -226,25 +228,28 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
MaybeObject* CompilationCacheScript::TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
Handle<CompilationCacheTable> table = GetFirstTable();
return table->Put(*source, *function_info);
return table->Put(*source, *context, *function_info);
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(isolate(),
TryTablePut(source, function_info),
TryTablePut(source, context, function_info),
CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate());
SetFirstTable(TablePut(source, function_info));
SetFirstTable(TablePut(source, context, function_info));
}
@ -380,15 +385,17 @@ void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
Handle<SharedFunctionInfo> CompilationCache::LookupScript(
Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset,
Handle<Context> context) {
if (!IsEnabled()) {
return Handle<SharedFunctionInfo>::null();
}
return script_.Lookup(source, name, line_offset, column_offset);
return script_.Lookup(source, name, line_offset, column_offset, context);
}
@ -426,12 +433,13 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) {
return;
}
script_.Put(source, function_info);
script_.Put(source, context, function_info);
}

21
deps/v8/src/compilation-cache.h

@ -98,16 +98,23 @@ class CompilationCacheScript : public CompilationSubCache {
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
int column_offset,
Handle<Context> context);
void Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
@ -122,7 +129,7 @@ class CompilationCacheScript : public CompilationSubCache {
// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
// in global contexts and one for eval calls in other contexts. The cache
// in native contexts and one for eval calls in other contexts. The cache
// considers the following pieces of information when checking for matching
// entries:
// 1. The source string.
@ -204,7 +211,8 @@ class CompilationCache {
Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
int column_offset,
Handle<Context> context);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
@ -223,6 +231,7 @@ class CompilationCache {
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple

557
deps/v8/src/compiler.cc

@ -51,7 +51,7 @@ namespace v8 {
namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script)
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: isolate_(script->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
@ -60,12 +60,15 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
script_(script),
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(AstNode::kNoNumber) {
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
: isolate_(shared_info->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@ -76,12 +79,14 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
script_(Handle<Script>(Script::cast(shared_info->script()))),
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(AstNode::kNoNumber) {
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: isolate_(closure->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
@ -93,11 +98,19 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(AstNode::kNoNumber) {
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
CompilationInfo::~CompilationInfo() {
delete deferred_handles_;
}
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@ -118,7 +131,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
function()->scope()->AllowsLazyRecompilation() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
@ -137,9 +150,8 @@ void CompilationInfo::AbortOptimization() {
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
static bool is_debugging_active() {
static bool IsDebuggerActive(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Isolate* isolate = Isolate::Current();
return V8::UseCrankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
@ -149,27 +161,32 @@ static bool is_debugging_active() {
}
static bool AlwaysFullCompiler() {
return FLAG_always_full_compiler || is_debugging_active();
static bool AlwaysFullCompiler(Isolate* isolate) {
return FLAG_always_full_compiler || IsDebuggerActive(isolate);
}
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
void OptimizingCompiler::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
double ms_creategraph =
static_cast<double>(time_taken_to_create_graph_) / 1000;
double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
if (FLAG_trace_opt) {
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
PrintF(" - took %0.3f ms]\n", ms);
PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
ms_codegen);
}
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
static int compiled_functions = 0;
static int code_size = 0;
compilation_time += ms;
compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
@ -180,46 +197,54 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
}
// A return value of true indicates the compilation pipeline is still
// going, not necessarily that we optimized the code.
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
if (!V8::UseCrankshaft()) {
info->DisableOptimization();
}
OptimizingCompiler compiler(info);
OptimizingCompiler::Status status = compiler.CreateGraph();
// In case we are not optimizing simply return the code from
// the full code generator.
if (!info->IsOptimizing()) {
return FullCodeGenerator::MakeCode(info);
if (status != OptimizingCompiler::SUCCEEDED) {
return status != OptimizingCompiler::FAILED;
}
status = compiler.OptimizeGraph();
if (status != OptimizingCompiler::SUCCEEDED) {
status = compiler.AbortOptimization();
return status != OptimizingCompiler::FAILED;
}
status = compiler.GenerateAndInstallCode();
return status != OptimizingCompiler::FAILED;
}
// We should never arrive here if there is not code object on the
OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
ASSERT(V8::UseCrankshaft());
ASSERT(info()->IsOptimizing());
ASSERT(!info()->IsCompilingForDebugging());
// We should never arrive here if there is no code object on the
// shared function object.
Handle<Code> code(info->shared_info()->code());
Handle<Code> code(info()->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
ASSERT(!info->shared_info()->optimization_disabled());
ASSERT(!info()->shared_info()->optimization_disabled());
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
if (AlwaysFullCompiler()) {
info->SetCode(code);
return true;
if (AlwaysFullCompiler(info()->isolate())) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
info->AbortOptimization();
info->shared_info()->DisableOptimization();
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info()->shared_info()->opt_count() > kMaxOptCount) {
info()->set_bailout_reason("optimized too many times");
return AbortOptimization();
}
// Due to an encoding limit on LUnallocated operands in the Lithium
@ -230,27 +255,28 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// The encoding is as a signed value, with parameters and receiver using
// the negative indices and locals the non-negative ones.
const int parameter_limit = -LUnallocated::kMinFixedIndex;
Scope* scope = info()->scope();
if ((scope->num_parameters() + 1) > parameter_limit) {
info()->set_bailout_reason("too many parameters");
return AbortOptimization();
}
const int locals_limit = LUnallocated::kMaxFixedIndex;
Scope* scope = info->scope();
if ((scope->num_parameters() + 1) > parameter_limit ||
(info->osr_ast_id() != AstNode::kNoNumber &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
info->AbortOptimization();
info->shared_info()->DisableOptimization();
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
if (!info()->osr_ast_id().IsNone() &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
info()->set_bailout_reason("too many parameters/locals");
return AbortOptimization();
}
// Take --hydrogen-filter into account.
Handle<String> name = info->function()->debug_name();
Handle<String> name = info()->function()->debug_name();
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
&& name->IsEqualTo(filter.SubVector(1, filter.length())))
|| (filter[0] != '-' && !name->IsEqualTo(filter))) {
info->SetCode(code);
return true;
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
}
@ -258,20 +284,21 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
int64_t start = OS::Ticks();
bool should_recompile = !info->shared_info()->has_deoptimization_support();
Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
CompilationInfo unoptimized(info->shared_info());
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info->function());
unoptimized.SetScope(info->scope());
unoptimized.SetFunction(info()->function());
unoptimized.SetScope(info()->scope());
unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
if (should_recompile) {
if (!succeeded) return false;
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!succeeded) return SetLastStatus(FAILED);
Handle<SharedFunctionInfo> shared = info()->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
Compiler::RecordFunctionCompilation(
@ -285,51 +312,93 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || code->optimizable());
ASSERT(info->shared_info()->has_deoptimization_support());
ASSERT(info()->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
HTracer::Instance()->TraceCompilation(info->function());
HTracer::Instance()->TraceCompilation(info()->function());
}
Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context, info->isolate(),
info->isolate()->zone());
HGraphBuilder builder(info, &oracle, info->isolate()->zone());
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
if (info->isolate()->has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) {
info()->SetCode(Handle<Code>::null());
return SetLastStatus(FAILED);
}
if (graph != NULL) {
Handle<Code> optimized_code = graph->Compile(info, graph->zone());
if (!optimized_code.is_null()) {
info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
return true;
// The function being compiled may have bailed out due to an inline
// candidate bailing out. In such a case, we don't disable
// optimization on the shared_info.
ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
if (graph_ == NULL) {
if (graph_builder_->inline_bailout()) {
info_->AbortOptimization();
return SetLastStatus(BAILED_OUT);
} else {
return AbortOptimization();
}
}
return SetLastStatus(SUCCEEDED);
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
AssertNoAllocation no_gc;
NoHandleAllocation no_handles;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
ASSERT(graph_ != NULL);
SmartArrayPointer<char> bailout_reason;
if (!graph_->Optimize(&bailout_reason)) {
if (!bailout_reason.is_empty()) graph_builder_->Bailout(*bailout_reason);
return SetLastStatus(BAILED_OUT);
} else {
chunk_ = LChunk::NewChunk(graph_);
if (chunk_ == NULL) {
return SetLastStatus(BAILED_OUT);
}
}
return SetLastStatus(SUCCEEDED);
}
// Keep using the shared code.
info->AbortOptimization();
if (!builder.inline_bailout()) {
// Mark the shared code as unoptimizable unless it was an inlined
// function that bailed out.
info->shared_info()->DisableOptimization();
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
return AbortOptimization();
}
// True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code.
return true;
info()->SetCode(optimized_code);
RecordOptimizationStats();
return SetLastStatus(SUCCEEDED);
}
static bool GenerateCode(CompilationInfo* info) {
return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
FullCodeGenerator::MakeCode(info) :
MakeCrankshaftCode(info);
bool is_optimizing = V8::UseCrankshaft() &&
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
// Have the CompilationInfo decide if the compilation should be
// BASE or NONOPT.
info->DisableOptimization();
}
return FullCodeGenerator::MakeCode(info);
}
}
@ -348,7 +417,7 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
bool succeeded = MakeCode(info);
if (!info->shared_info().is_null()) {
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
info->isolate()->zone());
info->zone());
info->shared_info()->set_scope_info(*scope_info);
}
return succeeded;
@ -358,12 +427,12 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
ASSERT(!isolate->global_context().is_null());
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
script->set_context_data((*isolate->global_context())->data());
script->set_context_data((*isolate->native_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@ -422,7 +491,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
lit->name(),
lit->materialized_literal_count(),
info->code(),
ScopeInfo::Create(info->scope(), info->isolate()->zone()));
ScopeInfo::Create(info->scope(), info->zone()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
@ -464,7 +533,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
return result;
}
@ -474,6 +543,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
int column_offset,
Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@ -494,7 +564,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
result = compilation_cache->LookupScript(source,
script_name,
line_offset,
column_offset);
column_offset,
context);
}
if (result.is_null()) {
@ -522,16 +593,17 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
: *script_data);
// Compile the function and add it to the cache.
CompilationInfo info(script);
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
info.SetContext(context);
if (FLAG_use_strict) {
info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
}
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
@ -570,16 +642,16 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfo info(script);
CompilationInfoWithZone info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
info.SetCallingContext(context);
info.SetContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
result->DisableOptimization();
result->DisableOptimization("eval");
// If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider:
@ -589,8 +661,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
// extended mode.
ASSERT(language_mode != EXTENDED_MODE ||
result->is_extended_mode());
compilation_cache->PutEval(
source, context, is_global, result, scope_position);
if (!result->dont_cache()) {
compilation_cache->PutEval(
source, context, is_global, result, scope_position);
}
}
} else {
if (result->ic_age() != HEAP->global_ic_age()) {
@ -602,10 +676,113 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
}
static bool InstallFullCode(CompilationInfo* info) {
// Update the shared function info with the compiled code and the
// scope info. Please note, that the order of the shared function
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this.
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Code> code = info->code();
Handle<JSFunction> function = info->closure();
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
}
// Set the expected number of properties for instances.
FunctionLiteral* lit = info->function();
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
// Set the optimization hints after performing lazy compilation, as
// these are not set when the function is set up as a lazily
// compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
if (V8::UseCrankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
!Isolate::Current()->DebuggerHasBreakPoints()) {
CompilationInfoWithZone optimized(function);
optimized.SetOptimizing(BailoutId::None());
return Compiler::CompileLazy(&optimized);
}
}
return true;
}
static void InstallCodeCommon(CompilationInfo* info) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Code> code = info->code();
ASSERT(!code.is_null());
// Set optimizable to false if this is disallowed by the shared
// function info, e.g., we might have flushed the code and must
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}
static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Code> code = info->code();
Handle<JSFunction> function = info->closure();
if (FLAG_cache_optimized_code && code->kind() == Code::OPTIMIZED_FUNCTION) {
Handle<SharedFunctionInfo> shared(function->shared());
Handle<FixedArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(
shared, native_context, code, literals);
}
}
static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
if (FLAG_cache_optimized_code && info->IsOptimizing()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<JSFunction> function = info->closure();
ASSERT(!function.is_null());
Handle<Context> native_context(function->context()->native_context());
int index = shared->SearchOptimizedCodeMap(*native_context);
if (index > 0) {
if (FLAG_trace_opt) {
PrintF("[found optimized code for: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(*function));
}
// Caching of optimized code enabled and optimized code found.
shared->InstallFromOptimizedCodeMap(*function, index);
return true;
}
}
return false;
}
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER);
@ -616,6 +793,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
if (InstallCodeFromOptimizedCodeMap(info)) return true;
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info, kNoParsingFlags)) {
// Measure how long it takes to do the lazy compilation; only take the
@ -634,69 +813,17 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
isolate->StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
// Set optimizable to false if this is disallowed by the shared
// function info, e.g., we might have flushed the code and must
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
InstallCodeCommon(info);
if (info->IsOptimizing()) {
Handle<Code> code = info->code();
ASSERT(shared->scope_info() != ScopeInfo::Empty());
function->ReplaceCode(*code);
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
} else {
// Update the shared function info with the compiled code and the
// scope info. Please note, that the order of the shared function
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By setting the code object last we avoid this.
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->isolate()->zone());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
}
// Set the expected number of properties for instances.
FunctionLiteral* lit = info->function();
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
// Set the optimization hints after performing lazy compilation, as
// these are not set when the function is set up as a lazily
// compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
if (V8::UseCrankshaft()&&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt &&
!Isolate::Current()->DebuggerHasBreakPoints()) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
}
}
return InstallFullCode(info);
}
return true;
}
}
@ -705,10 +832,97 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
}
void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (closure->IsInRecompileQueue()) return;
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
}
return;
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size);
info->SetOptimizing(BailoutId::None());
{
CompilationHandleScope handle_scope(*info);
if (InstallCodeFromOptimizedCodeMap(*info)) return;
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
info->SetLanguageMode(language_mode);
shared->set_language_mode(language_mode);
info->SaveHandles();
if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
OptimizingCompiler* compiler =
new(info->zone()) OptimizingCompiler(*info);
OptimizingCompiler::Status status = compiler->CreateGraph();
if (status == OptimizingCompiler::SUCCEEDED) {
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
shared->code()->set_profiler_ticks(0);
closure->ReplaceCode(isolate->builtins()->builtin(
Builtins::kInRecompileQueue));
info.Detach();
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
}
}
}
if (isolate->has_pending_exception()) {
isolate->clear_pending_exception();
}
}
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
if (status != OptimizingCompiler::SUCCEEDED) {
optimizing_compiler->info()->set_bailout_reason(
"failed/bailed out last time");
status = optimizing_compiler->AbortOptimization();
} else {
status = optimizing_compiler->GenerateAndInstallCode();
ASSERT(status == OptimizingCompiler::SUCCEEDED ||
status == OptimizingCompiler::BAILED_OUT);
}
InstallCodeCommon(*info);
if (status == OptimizingCompiler::SUCCEEDED) {
Handle<Code> code = info->code();
ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
info->closure()->ReplaceCode(*code);
if (info->shared_info()->SearchOptimizedCodeMap(
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
} else {
info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
}
}
Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script) {
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfo info(script);
CompilationInfoWithZone info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
@ -719,19 +933,24 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
// If the debugger requests compilation for break points, we cannot be
// aggressive about lazy compilation, because it might trigger compilation
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive(info.isolate());
!LiveEditFunctionTracker::IsActive(info.isolate()) &&
(!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
// Generate code
if (FLAG_lazy && allow_lazy) {
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
Handle<Code> code = info.isolate()->builtins()->LazyCompile();
info.SetCode(code);
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
} else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
return Handle<SharedFunctionInfo>::null();
}
@ -745,12 +964,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
SetFunctionInfo(result, literal, false, script);
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count());
live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
return result;
}
@ -777,6 +997,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());
function_info->set_language_mode(lit->language_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
@ -784,6 +1006,7 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_function(lit->is_function());
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
}
@ -796,7 +1019,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging() ||
if (info->isolate()->logger()->is_logging_code_events() ||
CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();

198
deps/v8/src/compiler.h

@ -39,16 +39,21 @@ class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED {
class CompilationInfo {
public:
explicit CompilationInfo(Handle<Script> script);
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
explicit CompilationInfo(Handle<JSFunction> closure);
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
virtual ~CompilationInfo();
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
Zone* zone() {
return zone_;
}
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
@ -67,8 +72,8 @@ class CompilationInfo BASE_EMBEDDED {
Handle<Script> script() const { return script_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> calling_context() const { return calling_context_; }
int osr_ast_id() const { return osr_ast_id_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
void MarkAsEval() {
ASSERT(!is_lazy());
@ -115,13 +120,8 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
pre_parse_data_ = pre_parse_data;
}
void SetCallingContext(Handle<Context> context) {
ASSERT(is_eval());
calling_context_ = context;
}
void SetOsrAstId(int osr_ast_id) {
ASSERT(IsOptimizing());
osr_ast_id_ = osr_ast_id;
void SetContext(Handle<Context> context) {
context_ = context;
}
void MarkCompilingForDebugging(Handle<Code> current_code) {
ASSERT(mode_ != OPTIMIZE);
@ -138,17 +138,18 @@ class CompilationInfo BASE_EMBEDDED {
}
bool has_global_object() const {
return !closure().is_null() && (closure()->context()->global() != NULL);
return !closure().is_null() &&
(closure()->context()->global_object() != NULL);
}
GlobalObject* global_object() const {
return has_global_object() ? closure()->context()->global() : NULL;
return has_global_object() ? closure()->context()->global_object() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
void SetOptimizing(int osr_ast_id) {
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
@ -170,6 +171,21 @@ class CompilationInfo BASE_EMBEDDED {
// current compilation pipeline.
void AbortOptimization();
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
}
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
SaveHandle(&context_);
SaveHandle(&script_);
}
const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
private:
Isolate* isolate_;
@ -184,8 +200,6 @@ class CompilationInfo BASE_EMBEDDED {
NONOPT
};
CompilationInfo() : function_(NULL) {}
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
@ -196,6 +210,7 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
}
void SetMode(Mode mode) {
@ -246,18 +261,148 @@ class CompilationInfo BASE_EMBEDDED {
v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
// The context of the caller is needed for eval code, and will be a null
// handle otherwise.
Handle<Context> calling_context_;
// The context of the caller for eval code, and the global context for a
// global script. Will be a null handle otherwise.
Handle<Context> context_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
int osr_ast_id_;
BailoutId osr_ast_id_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
DeferredHandles* deferred_handles_;
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
Handle<T> handle(*(*object));
*object = handle;
}
}
const char* bailout_reason_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
// Exactly like a CompilationInfo, except also creates and enters a
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
zone_(script->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
: CompilationInfo(shared_info, &zone_),
zone_(shared_info->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
ZoneScope zone_scope_;
};
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
class CompilationHandleScope BASE_EMBEDDED {
public:
explicit CompilationHandleScope(CompilationInfo* info)
: deferred_(info->isolate()), info_(info) {}
~CompilationHandleScope() {
info_->set_deferred_handles(deferred_.Detach());
}
private:
DeferredHandleScope deferred_;
CompilationInfo* info_;
};
class HGraph;
class HGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
// Crankshaft and keeps track of its state. The three phases
// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
class OptimizingCompiler: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
: info_(info),
oracle_(NULL),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
time_taken_to_create_graph_(0),
time_taken_to_optimize_(0),
time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
};
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
MUST_USE_RESULT Status GenerateAndInstallCode();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
MUST_USE_RESULT Status AbortOptimization() {
info_->AbortOptimization();
info_->shared_info()->DisableOptimization(info_->bailout_reason());
return SetLastStatus(BAILED_OUT);
}
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
HGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
int64_t time_taken_to_optimize_;
int64_t time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
return last_status_;
}
void RecordOptimizationStats();
struct Timer {
Timer(OptimizingCompiler* compiler, int64_t* location)
: compiler_(compiler),
start_(OS::Ticks()),
location_(location) { }
~Timer() {
*location_ += (OS::Ticks() - start_);
}
OptimizingCompiler* compiler_;
int64_t start_;
int64_t* location_;
};
};
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
@ -271,10 +416,6 @@ class CompilationInfo BASE_EMBEDDED {
class Compiler : public AllStatic {
public:
// Default maximum number of function optimization attempts before we
// give up.
static const int kDefaultMaxOptCount = 10;
static const int kMaxInliningLevels = 3;
// Call count before primitive functions trigger their own optimization.
@ -289,6 +430,7 @@ class Compiler : public AllStatic {
Handle<Object> script_name,
int line_offset,
int column_offset,
Handle<Context> context,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
@ -305,6 +447,8 @@ class Compiler : public AllStatic {
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
static void RecompileParallel(Handle<JSFunction> function);
// Compile a shared function info object (the function is possibly lazily
// compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
@ -316,6 +460,8 @@ class Compiler : public AllStatic {
bool is_toplevel,
Handle<Script> script);
static void InstallOptimizedCode(OptimizingCompiler* info);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif

53
deps/v8/src/contexts.cc

@ -36,7 +36,7 @@ namespace internal {
Context* Context::declaration_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
while (!current->IsFunctionContext() && !current->IsNativeContext()) {
current = current->previous();
ASSERT(current->closure() == closure());
}
@ -45,7 +45,7 @@ Context* Context::declaration_context() {
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global();
GlobalObject* object = global_object();
if (object->IsJSGlobalObject()) {
return JSGlobalObject::cast(object)->builtins();
} else {
@ -55,19 +55,19 @@ JSBuiltinsObject* Context::builtins() {
}
Context* Context::global_context() {
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
// context.
if (global()->IsGlobalObject()) {
return global()->global_context();
if (global_object()->IsGlobalObject()) {
return global_object()->native_context();
}
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context.
// have to search the context chain to find the native context.
ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsGlobalContext()) {
while (!current->IsNativeContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
current = Context::cast(closure->context());
}
@ -76,11 +76,11 @@ Context* Context::global_context() {
JSObject* Context::global_proxy() {
return global_context()->global_proxy_object();
return native_context()->global_proxy_object();
}
void Context::set_global_proxy(JSObject* object) {
global_context()->set_global_proxy_object(object);
native_context()->set_global_proxy_object(object);
}
@ -106,12 +106,12 @@ Handle<Object> Context::Lookup(Handle<String> name,
do {
if (FLAG_trace_contexts) {
PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
if (context->IsGlobalContext()) PrintF(" (global context)");
if (context->IsNativeContext()) PrintF(" (native context)");
PrintF("\n");
}
// 1. Check global objects, subjects of with, and extension objects.
if (context->IsGlobalContext() ||
if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
@ -226,7 +226,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
// 3. Prepare to continue with the previous (next outermost) context.
if (context->IsGlobalContext()) {
if (context->IsNativeContext()) {
follow_context_chain = false;
} else {
context = Handle<Context>(context->previous(), isolate);
@ -241,19 +241,21 @@ Handle<Object> Context::Lookup(Handle<String> name,
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
ASSERT(IsNativeContext());
#ifdef DEBUG
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
CHECK(element != function);
element = JSFunction::cast(element)->next_function_link();
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
while (!element->IsUndefined()) {
CHECK(element != function);
element = JSFunction::cast(element)->next_function_link();
}
}
CHECK(function->next_function_link()->IsUndefined());
// Check that the context belongs to the weak global contexts list.
// Check that the context belongs to the weak native contexts list.
bool found = false;
Object* context = GetHeap()->global_contexts_list();
Object* context = GetHeap()->native_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
@ -269,7 +271,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
void Context::RemoveOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
ASSERT(IsNativeContext());
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
while (!element->IsUndefined()) {
@ -293,7 +295,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
Object* Context::OptimizedFunctionsListHead() {
ASSERT(IsGlobalContext());
ASSERT(IsNativeContext());
return get(OPTIMIZED_FUNCTIONS_LIST);
}
@ -304,10 +306,15 @@ void Context::ClearOptimizedFunctions() {
#ifdef DEBUG
bool Context::IsBootstrappingOrContext(Object* object) {
bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
if (Isolate::Current()->bootstrapper()->IsActive()) return true;
if (!object->IsContext()) return false;
Context* context = Context::cast(object);
return context->IsNativeContext() || context->IsGlobalContext() ||
context->IsModuleContext() || !child->IsModuleContext();
}

64
deps/v8/src/contexts.h

@ -96,7 +96,7 @@ enum BindingFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
#define GLOBAL_CONTEXT_FIELDS(V) \
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
@ -190,16 +190,19 @@ enum BindingFlags {
// Dynamically declared variables/functions are also added
// to lazily allocated extension object. Context::Lookup
// searches the extension object for properties.
// For global and block contexts, contains the respective
// ScopeInfo.
// For module contexts, points back to the respective JSModule.
//
// [ global ] A pointer to the global object. Provided for quick
// [ global_object ] A pointer to the global object. Provided for quick
// access to the global object from inside the code (since
// we always have a context pointer).
//
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
// (via static context addresses) or through 'eval' (dynamic context lookups).
// Finally, the global context contains additional slots for fast access to
// global properties.
// Finally, the native context contains additional slots for fast access to
// native properties.
class Context: public FixedArray {
public:
@ -217,15 +220,15 @@ class Context: public FixedArray {
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
// scope info (block contexts).
// scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
GLOBAL_OBJECT_INDEX,
MIN_CONTEXT_SLOTS,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
// These slots are only in global contexts.
// These slots are only in native contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
@ -292,7 +295,7 @@ class Context: public FixedArray {
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
GLOBAL_CONTEXT_SLOTS,
NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
};
@ -303,7 +306,7 @@ class Context: public FixedArray {
Context* previous() {
Object* result = unchecked_previous();
ASSERT(IsBootstrappingOrContext(result));
ASSERT(IsBootstrappingOrValidParentContext(result, this));
return reinterpret_cast<Context*>(result);
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
@ -312,16 +315,21 @@ class Context: public FixedArray {
Object* extension() { return get(EXTENSION_INDEX); }
void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
GlobalObject* global_object() {
Object* result = get(GLOBAL_OBJECT_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
void set_global_object(GlobalObject* object) {
set(GLOBAL_OBJECT_INDEX, object);
}
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
@ -330,11 +338,11 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
// Compute the global context by traversing the context chain.
Context* global_context();
// Compute the native context by traversing the context chain.
Context* native_context();
// Predicates for context types. IsGlobalContext is defined on Object
// because we frequently have to know if arbitrary objects are global
// Predicates for context types. IsNativeContext is defined on Object
// because we frequently have to know if arbitrary objects are natives
// contexts.
bool IsFunctionContext() {
Map* map = this->map();
@ -356,30 +364,34 @@ class Context: public FixedArray {
Map* map = this->map();
return map == map->GetHeap()->module_context_map();
}
bool IsGlobalContext() {
Map* map = this->map();
return map == map->GetHeap()->global_context_map();
}
// Tells whether the global context is marked with out of memory.
// Tells whether the native context is marked with out of memory.
inline bool has_out_of_memory();
// Mark the global context with out of memory.
// Mark the native context with out of memory.
inline void mark_out_of_memory();
// A global context hold a list of all functions which have been optimized.
// A native context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \
ASSERT(IsNativeContext()); \
set(index, value); \
} \
type* name() { \
ASSERT(IsGlobalContext()); \
ASSERT(IsNativeContext()); \
return type::cast(get(index)); \
}
GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
// Lookup the slot called name, starting with the current context.
// There are three possibilities:
@ -409,7 +421,7 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.
typedef FixedBodyDescriptor<
@ -426,7 +438,7 @@ class Context: public FixedArray {
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrContext(Object* object);
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
};

36
deps/v8/src/conversions-inl.h

@ -51,6 +51,11 @@ inline double JunkStringValue() {
}
inline double SignedZero(bool negative) {
return negative ? uint64_to_double(Double::kSignMask) : 0.0;
}
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@ -263,6 +268,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
if (radix == 0) {
// Radix detection.
radix = 10;
if (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
@ -271,11 +277,8 @@ double InternalStringToInt(UnicodeCache* unicode_cache,
++current;
if (current == end) return JunkStringValue();
} else {
radix = 8;
leading_zero = true;
}
} else {
radix = 10;
}
} else if (radix == 16) {
if (*current == '0') {
@ -459,16 +462,23 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
bool negative = false;
enum Sign {
NONE,
NEGATIVE,
POSITIVE
};
Sign sign = NONE;
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JunkStringValue();
sign = POSITIVE;
} else if (*current == '-') {
++current;
if (current == end) return JunkStringValue();
negative = true;
sign = NEGATIVE;
}
static const char kInfinitySymbol[] = "Infinity";
@ -483,34 +493,34 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
ASSERT(buffer_pos == 0);
return negative ? -V8_INFINITY : V8_INFINITY;
return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
}
bool leading_zero = false;
if (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
if (current == end) return SignedZero(sign == NEGATIVE);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
if (current == end || !isDigit(*current, 16)) {
if (current == end || !isDigit(*current, 16) || sign != NONE) {
return JunkStringValue(); // "0x".
}
return InternalStringToIntDouble<4>(unicode_cache,
current,
end,
negative,
false,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
if (current == end) return SignedZero(sign == NEGATIVE);
}
}
@ -555,7 +565,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
// leading zeros (if any).
while (*current == '0') {
++current;
if (current == end) return SignedZero(negative);
if (current == end) return SignedZero(sign == NEGATIVE);
exponent--; // Move this 0 into the exponent.
}
}
@ -647,7 +657,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
return InternalStringToIntDouble<3>(unicode_cache,
buffer,
buffer + buffer_pos,
negative,
sign == NEGATIVE,
allow_trailing_junk);
}
@ -660,7 +670,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
buffer[buffer_pos] = '\0';
double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
return negative ? -converted : converted;
return (sign == NEGATIVE) ? -converted : converted;
}
} } // namespace v8::internal

11
deps/v8/src/conversions.h

@ -52,8 +52,13 @@ inline bool isDigit(int x, int radix) {
}
inline double SignedZero(bool negative) {
return negative ? -0.0 : 0.0;
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// For NaN and values outside the int range, return INT_MIN or INT_MAX.
inline int FastD2IChecked(double x) {
if (!(x >= INT_MIN)) return INT_MIN; // Negation to catch NaNs.
if (x > INT_MAX) return INT_MAX;
return static_cast<int>(x);
}
@ -62,8 +67,6 @@ inline double SignedZero(bool negative) {
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
inline int FastD2I(double x) {
// The static_cast convertion from double to int used to be slow, but
// as new benchmarks show, now it is much faster than lrint().
return static_cast<int>(x);
}

26
deps/v8/src/counters.cc

@ -1,4 +1,4 @@
// Copyright 2007-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -64,9 +64,20 @@ void StatsCounterTimer::Stop() {
counter_.Increment(milliseconds);
}
void Histogram::AddSample(int sample) {
if (Enabled()) {
Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample);
}
}
void* Histogram::CreateHistogram() const {
return Isolate::Current()->stats_table()->
CreateHistogram(name_, min_, max_, num_buckets_);
}
// Start the timer.
void HistogramTimer::Start() {
if (GetHistogram() != NULL) {
if (histogram_.Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@ -74,20 +85,13 @@ void HistogramTimer::Start() {
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (histogram_ != NULL) {
if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
Isolate::Current()->stats_table()->
AddHistogramSample(histogram_, milliseconds);
histogram_.AddSample(milliseconds);
}
}
void* HistogramTimer::CreateHistogram() const {
return Isolate::Current()->stats_table()->
CreateHistogram(name_, 0, 10000, 50);
}
} } // namespace v8::internal

60
deps/v8/src/counters.h

@ -1,4 +1,4 @@
// Copyright 2007-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -169,8 +169,7 @@ struct StatsCounter {
protected:
// Returns the cached address of this counter location.
int* GetPtr() {
if (lookup_done_)
return ptr_;
if (lookup_done_) return ptr_;
lookup_done_ = true;
ptr_ = FindLocationInStatsTable();
return ptr_;
@ -199,25 +198,30 @@ struct StatsCounterTimer {
}
};
// A HistogramTimer allows distributions of results to be created
// HistogramTimer t = { L"foo", NULL, false, 0, 0 };
struct HistogramTimer {
// A Histogram represents a dynamically created histogram in the StatsTable.
//
// This class is designed to be POD initialized. It will be registered with
// the histogram system on first use. For example:
// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
struct Histogram {
const char* name_;
int min_;
int max_;
int num_buckets_;
void* histogram_;
bool lookup_done_;
int64_t start_time_;
int64_t stop_time_;
// Start the timer.
void Start();
// Add a single sample to this histogram.
void AddSample(int sample);
// Stop the timer and record the results.
void Stop();
// Returns true if this histogram is enabled.
bool Enabled() {
return GetHistogram() != NULL;
}
// Returns true if the timer is running.
bool Running() {
return (histogram_ != NULL) && (start_time_ != 0) && (stop_time_ == 0);
// Reset the cached internal pointer.
void Reset() {
lookup_done_ = false;
}
protected:
@ -234,6 +238,30 @@ struct HistogramTimer {
void* CreateHistogram() const;
};
// A HistogramTimer allows distributions of results to be created
// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
struct HistogramTimer {
Histogram histogram_;
int64_t start_time_;
int64_t stop_time_;
// Start the timer.
void Start();
// Stop the timer and record the results.
void Stop();
// Returns true if the timer is running.
bool Running() {
return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
}
void Reset() {
histogram_.Reset();
}
};
// Helper class for scoping a HistogramTimer.
class HistogramTimerScope BASE_EMBEDDED {
public:

2
deps/v8/src/cpu-profiler.h

@ -188,7 +188,7 @@ class ProfilerEventsProcessor : public Thread {
#define PROFILE(isolate, Call) \
LOG(isolate, Call); \
LOG_CODE_EVENT(isolate, Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
v8::internal::CpuProfiler::Call; \

495
deps/v8/src/d8.cc

@ -284,9 +284,9 @@ Handle<Value> Shell::Load(const Arguments& args) {
return Undefined();
}
static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (value_in->IsUint32()) {
return value_in->Uint32Value();
static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
if (value_in->IsInt32()) {
return value_in->Int32Value();
}
Local<Value> number = value_in->ToNumber();
@ -296,7 +296,15 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
Local<Int32> int32 = number->ToInt32();
if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
int32_t raw_value = int32->Int32Value();
int32_t value = int32->Int32Value();
if (try_catch->HasCaught()) return 0;
return value;
}
static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
int32_t raw_value = convertToInt(value_in, try_catch);
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
@ -312,14 +320,18 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ThrowException(
String::New("Array length exceeds maximum length."));
}
return static_cast<size_t>(raw_value);
return raw_value;
}
// TODO(rossberg): should replace these by proper uses of HasInstance,
// once we figure out a good way to make the templates global.
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
@ -327,11 +339,10 @@ Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
return ThrowException(String::New("Memory allocation failed"));
}
memset(data, 0, length);
Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
@ -346,28 +357,73 @@ Handle<Value> Shell::CreateExternalArrayBuffer(int32_t length) {
}
Handle<Value> Shell::CreateExternalArrayBuffer(const Arguments& args) {
Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
delete[] rec_args;
return result;
}
if (args.Length() == 0) {
return ThrowException(
String::New("ArrayBuffer constructor must have one parameter."));
String::New("ArrayBuffer constructor must have one argument"));
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (try_catch.HasCaught()) return try_catch.ReThrow();
return CreateExternalArrayBuffer(length);
return CreateExternalArrayBuffer(args.This(), length);
}
Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
int32_t byteLength,
int32_t byteOffset,
int32_t element_size) {
ASSERT(element_size == 1 || element_size == 2 ||
element_size == 4 || element_size == 8);
ASSERT(byteLength == length * element_size);
void* data = buffer->GetIndexedPropertiesExternalArrayData();
ASSERT(data != NULL);
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
array->Set(String::New("buffer"), buffer, ReadOnly);
return array;
}
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
Handle<Value> result = args.Callee()->NewInstance(args.Length(), rec_args);
delete[] rec_args;
return result;
}
TryCatch try_catch;
ASSERT(element_size == 1 || element_size == 2 ||
element_size == 4 || element_size == 8);
// Currently, only the following constructors are supported:
// All of the following constructors are supported:
// TypedArray(unsigned long length)
// TypedArray(type[] array)
// TypedArray(TypedArray array)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
@ -375,29 +431,31 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t length;
int32_t byteLength;
int32_t byteOffset;
bool init_from_array = false;
if (args.Length() == 0) {
return ThrowException(
String::New("Array constructor must have at least one parameter."));
String::New("Array constructor must have at least one argument"));
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
!args[0]->ToObject()->GetHiddenValue(
String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
// Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
byteOffset = 0;
} else {
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
return ThrowException(String::New("byteOffset out of bounds"));
}
if (byteOffset % element_size != 0) {
return ThrowException(
String::New("byteOffset must be multiple of element_size"));
String::New("byteOffset must be multiple of element size"));
}
}
@ -406,41 +464,312 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
length = byteLength / element_size;
if (byteLength % element_size != 0) {
return ThrowException(
String::New("buffer size must be multiple of element_size"));
String::New("buffer size must be multiple of element size"));
}
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
return ThrowException(String::New("length out of bounds"));
}
}
} else {
length = convertToUint(args[0], &try_catch);
if (args[0]->IsObject() &&
args[0]->ToObject()->Has(String::New("length"))) {
// Construct from array.
length = convertToUint(
args[0]->ToObject()->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true;
} else {
// Construct from size.
length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
byteLength = length * element_size;
byteOffset = 0;
Handle<Value> result = CreateExternalArrayBuffer(byteLength);
if (!result->IsObject()) return result;
Handle<Object> global = Context::GetCurrent()->Global();
Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
1, buffer_args);
if (try_catch.HasCaught()) return result;
buffer = result->ToObject();
}
void* data = buffer->GetIndexedPropertiesExternalArrayData();
ASSERT(data != NULL);
Handle<Object> array = CreateExternalArray(
args.This(), buffer, type, length, byteLength, byteOffset, element_size);
Handle<Object> array = Object::New();
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
array->Set(String::New("buffer"), buffer, ReadOnly);
if (init_from_array) {
Handle<Object> init = args[0]->ToObject();
for (int i = 0; i < length; ++i) array->Set(i, init->Get(i));
}
return array;
}
Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return ThrowException(
String::New("'slice' invoked on non-object receiver"));
}
Local<Object> self = args.This();
Local<Value> marker =
self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
if (marker.IsEmpty()) {
return ThrowException(
String::New("'slice' invoked on wrong receiver type"));
}
int32_t length =
convertToUint(self->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return ThrowException(
String::New("'slice' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (begin < 0) begin += length;
if (begin < 0) begin = 0;
if (begin > length) begin = length;
int32_t end;
if (args.Length() < 2 || args[1]->IsUndefined()) {
end = length;
} else {
end = convertToInt(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (end < 0) end += length;
if (end < 0) end = 0;
if (end > length) end = length;
if (end < begin) end = begin;
}
Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
Handle<Value> new_args[] = { Uint32::New(end - begin) };
Handle<Value> result = constructor->NewInstance(1, new_args);
if (try_catch.HasCaught()) return result;
Handle<Object> buffer = result->ToObject();
uint8_t* dest =
static_cast<uint8_t*>(buffer->GetIndexedPropertiesExternalArrayData());
uint8_t* src = begin + static_cast<uint8_t*>(
self->GetIndexedPropertiesExternalArrayData());
memcpy(dest, src, end - begin);
return buffer;
}
Handle<Value> Shell::ArraySubArray(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return ThrowException(
String::New("'subarray' invoked on non-object receiver"));
}
Local<Object> self = args.This();
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
return ThrowException(
String::New("'subarray' invoked on wrong receiver type"));
}
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length =
convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return ThrowException(
String::New("'subarray' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (begin < 0) begin += length;
if (begin < 0) begin = 0;
if (begin > length) begin = length;
int32_t end;
if (args.Length() < 2 || args[1]->IsUndefined()) {
end = length;
} else {
end = convertToInt(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (end < 0) end += length;
if (end < 0) end = 0;
if (end > length) end = length;
if (end < begin) end = begin;
}
length = end - begin;
byteOffset += begin * element_size;
Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
Handle<Value> construct_args[] = {
buffer, Uint32::New(byteOffset), Uint32::New(length)
};
return constructor->NewInstance(3, construct_args);
}
Handle<Value> Shell::ArraySet(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return ThrowException(
String::New("'set' invoked on non-object receiver"));
}
Local<Object> self = args.This();
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
return ThrowException(
String::New("'set' invoked on wrong receiver type"));
}
int32_t length =
convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return ThrowException(
String::New("'set' must have at least one argument"));
}
if (!args[0]->IsObject() ||
!args[0]->ToObject()->Has(String::New("length"))) {
return ThrowException(
String::New("'set' invoked with non-array argument"));
}
Handle<Object> source = args[0]->ToObject();
int32_t source_length =
convertToUint(source->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset;
if (args.Length() < 2 || args[1]->IsUndefined()) {
offset = 0;
} else {
offset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (offset + source_length > length) {
return ThrowException(String::New("offset or source length out of bounds"));
}
int32_t source_element_size;
if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
source_element_size = 0;
} else {
source_element_size =
convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers.
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
buffer->GetIndexedPropertiesExternalArrayData());
uint8_t* src = source_byteOffset + static_cast<uint8_t*>(
source_buffer->GetIndexedPropertiesExternalArrayData());
memmove(dest, src, source_length * element_size);
} else if (source_element_size == 0) {
// Source is not a typed array, copy element-wise sequentially.
for (int i = 0; i < source_length; ++i) {
self->Set(offset + i, source->Get(i));
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
} else {
// Need to copy element-wise to make the right conversions.
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) {
// Same backing store, need to handle overlap correctly.
// This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset =
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right.
int i = 0;
int32_t next_dest_offset = byteOffset + (offset + 1) * element_size;
int32_t next_src_offset = source_byteOffset + source_element_size;
while (i < length && next_dest_offset <= next_src_offset) {
self->Set(offset + i, source->Get(i));
++i;
next_dest_offset += element_size;
next_src_offset += source_element_size;
}
// Of what's left, copy as much as we can from right to left.
int j = length - 1;
int32_t dest_offset = byteOffset + (offset + j) * element_size;
int32_t src_offset = source_byteOffset + j * source_element_size;
while (j >= i && dest_offset >= src_offset) {
self->Set(offset + j, source->Get(j));
--j;
dest_offset -= element_size;
src_offset -= source_element_size;
}
// There can be at most 8 entries left in the middle that need buffering
// (because the largest element_size is 8 times the smallest).
ASSERT(j+1 - i <= 8);
Handle<Value> temp[8];
for (int k = i; k <= j; ++k) {
temp[k - i] = source->Get(k);
}
for (int k = i; k <= j; ++k) {
self->Set(offset + k, temp[k - i]);
}
} else {
// Different backing stores, safe to copy element-wise sequentially.
for (int i = 0; i < source_length; ++i)
self->Set(offset + i, source->Get(i));
}
}
return Undefined();
}
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
int32_t length =
@ -451,11 +780,6 @@ void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
}
Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
return CreateExternalArrayBuffer(args);
}
Handle<Value> Shell::Int8Array(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
@ -472,8 +796,8 @@ Handle<Value> Shell::Int16Array(const Arguments& args) {
Handle<Value> Shell::Uint16Array(const Arguments& args) {
return CreateExternalArray(args, kExternalUnsignedShortArray,
sizeof(uint16_t));
return CreateExternalArray(
args, kExternalUnsignedShortArray, sizeof(uint16_t));
}
@ -488,18 +812,18 @@ Handle<Value> Shell::Uint32Array(const Arguments& args) {
Handle<Value> Shell::Float32Array(const Arguments& args) {
return CreateExternalArray(args, kExternalFloatArray,
sizeof(float)); // NOLINT
return CreateExternalArray(
args, kExternalFloatArray, sizeof(float)); // NOLINT
}
Handle<Value> Shell::Float64Array(const Arguments& args) {
return CreateExternalArray(args, kExternalDoubleArray,
sizeof(double)); // NOLINT
return CreateExternalArray(
args, kExternalDoubleArray, sizeof(double)); // NOLINT
}
Handle<Value> Shell::PixelArray(const Arguments& args) {
Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
}
@ -729,7 +1053,7 @@ void Shell::InstallUtilityScript() {
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global());
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
@ -794,6 +1118,27 @@ class BZip2Decompressor : public v8::StartupDataDecompressor {
};
#endif
Handle<FunctionTemplate> Shell::CreateArrayBufferTemplate(
InvocationCallback fun) {
Handle<FunctionTemplate> buffer_template = FunctionTemplate::New(fun);
Local<Template> proto_template = buffer_template->PrototypeTemplate();
proto_template->Set(String::New("slice"),
FunctionTemplate::New(ArrayBufferSlice));
return buffer_template;
}
Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
Handle<FunctionTemplate> array_template = FunctionTemplate::New(fun);
Local<Template> proto_template = array_template->PrototypeTemplate();
proto_template->Set(String::New("set"), FunctionTemplate::New(ArraySet));
proto_template->Set(String::New("subarray"),
FunctionTemplate::New(ArraySubArray));
return array_template;
}
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
@ -812,26 +1157,28 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
FunctionTemplate::New(DisableProfiler));
// Bind the handlers for external arrays.
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(String::New("ArrayBuffer"),
FunctionTemplate::New(ArrayBuffer));
CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
FunctionTemplate::New(Int8Array));
CreateArrayTemplate(Int8Array), attr);
global_template->Set(String::New("Uint8Array"),
FunctionTemplate::New(Uint8Array));
CreateArrayTemplate(Uint8Array), attr);
global_template->Set(String::New("Int16Array"),
FunctionTemplate::New(Int16Array));
CreateArrayTemplate(Int16Array), attr);
global_template->Set(String::New("Uint16Array"),
FunctionTemplate::New(Uint16Array));
CreateArrayTemplate(Uint16Array), attr);
global_template->Set(String::New("Int32Array"),
FunctionTemplate::New(Int32Array));
CreateArrayTemplate(Int32Array), attr);
global_template->Set(String::New("Uint32Array"),
FunctionTemplate::New(Uint32Array));
CreateArrayTemplate(Uint32Array), attr);
global_template->Set(String::New("Float32Array"),
FunctionTemplate::New(Float32Array));
CreateArrayTemplate(Float32Array), attr);
global_template->Set(String::New("Float64Array"),
FunctionTemplate::New(Float64Array));
global_template->Set(String::New("PixelArray"),
FunctionTemplate::New(PixelArray));
CreateArrayTemplate(Float64Array), attr);
global_template->Set(String::New("Uint8ClampedArray"),
CreateArrayTemplate(Uint8ClampedArray), attr);
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), True());
@ -864,7 +1211,7 @@ void Shell::Initialize() {
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
if (i::FLAG_dump_counters) {
if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
@ -954,20 +1301,24 @@ void Shell::OnExit() {
counters[j].key = i.CurrentKey();
}
qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
printf("+--------------------------------------------+-------------+\n");
printf("| Name | Value |\n");
printf("+--------------------------------------------+-------------+\n");
printf("+----------------------------------------------------------------+"
"-------------+\n");
printf("| Name |"
" Value |\n");
printf("+----------------------------------------------------------------+"
"-------------+\n");
for (j = 0; j < number_of_counters; j++) {
Counter* counter = counters[j].counter;
const char* key = counters[j].key;
if (counter->is_histogram()) {
printf("| c:%-40s | %11i |\n", key, counter->count());
printf("| t:%-40s | %11i |\n", key, counter->sample_total());
printf("| c:%-60s | %11i |\n", key, counter->count());
printf("| t:%-60s | %11i |\n", key, counter->sample_total());
} else {
printf("| %-42s | %11i |\n", key, counter->count());
printf("| %-62s | %11i |\n", key, counter->count());
}
}
printf("+--------------------------------------------+-------------+\n");
printf("+----------------------------------------------------------------+"
"-------------+\n");
delete [] counters;
}
delete counters_file_;
@ -1238,6 +1589,11 @@ void SourceGroup::ExecuteInThread() {
Execute();
}
context.Dispose();
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!Shell::options.last_run);
@ -1283,6 +1639,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--test") == 0) {
options.test_shell = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--send-idle-notification") == 0) {
options.send_idle_notification = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--preemption") == 0) {
#ifdef V8_SHARED
printf("D8 with shared library does not support multi-threading\n");
@ -1439,13 +1798,11 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (!options.last_run) {
context.Dispose();
#if !defined(V8_SHARED)
if (i::FLAG_send_idle_notification) {
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
V8::IdleNotification(kLongIdlePauseInMs);
}
#endif // !V8_SHARED
}
#ifndef V8_SHARED

24
deps/v8/src/d8.h

@ -31,7 +31,7 @@
#ifndef V8_SHARED
#include "allocation.h"
#include "hashmap.h"
#include "smart-array-pointer.h"
#include "smart-pointers.h"
#include "v8.h"
#else
#include "../include/v8.h"
@ -67,7 +67,7 @@ class CounterCollection {
CounterCollection();
Counter* GetNextCounter();
private:
static const unsigned kMaxCounters = 256;
static const unsigned kMaxCounters = 512;
uint32_t magic_number_;
uint32_t max_counters_;
uint32_t max_name_size_;
@ -227,6 +227,7 @@ class ShellOptions {
#endif // V8_SHARED
script_executed(false),
last_run(true),
send_idle_notification(false),
stress_opt(false),
stress_deopt(false),
interactive_shell(false),
@ -249,6 +250,7 @@ class ShellOptions {
#endif // V8_SHARED
bool script_executed;
bool last_run;
bool send_idle_notification;
bool stress_opt;
bool stress_deopt;
bool interactive_shell;
@ -322,7 +324,10 @@ class Shell : public i::AllStatic {
static Handle<Value> Uint32Array(const Arguments& args);
static Handle<Value> Float32Array(const Arguments& args);
static Handle<Value> Float64Array(const Arguments& args);
static Handle<Value> PixelArray(const Arguments& args);
static Handle<Value> Uint8ClampedArray(const Arguments& args);
static Handle<Value> ArrayBufferSlice(const Arguments& args);
static Handle<Value> ArraySubArray(const Arguments& args);
static Handle<Value> ArraySet(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@ -383,8 +388,17 @@ class Shell : public i::AllStatic {
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Value> CreateExternalArrayBuffer(int32_t size);
static Handle<Value> CreateExternalArrayBuffer(const Arguments& args);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t size);
static Handle<Object> CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
int32_t byteLength,
int32_t byteOffset,
int32_t element_size);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size);

41
deps/v8/src/date.js

@ -318,7 +318,6 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this)
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this)
@ -328,7 +327,6 @@ function DateToString() {
// ECMA 262 - 15.9.5.3
function DateToDateString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(this);
@ -337,7 +335,6 @@ function DateToDateString() {
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var time_zone_string = LocalTimezoneString(this);
@ -353,7 +350,6 @@ function DateToLocaleString() {
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(this);
@ -362,7 +358,6 @@ function DateToLocaleDateString() {
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return TimeString(this);
@ -371,133 +366,114 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
CHECK_DATE(this);
return UTC_DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.10
function DateGetFullYear() {
CHECK_DATE(this);
return LOCAL_YEAR(this);
}
// ECMA 262 - 15.9.5.11
function DateGetUTCFullYear() {
CHECK_DATE(this);
return UTC_YEAR(this);
}
// ECMA 262 - 15.9.5.12
function DateGetMonth() {
CHECK_DATE(this);
return LOCAL_MONTH(this);
}
// ECMA 262 - 15.9.5.13
function DateGetUTCMonth() {
CHECK_DATE(this);
return UTC_MONTH(this);
}
// ECMA 262 - 15.9.5.14
function DateGetDate() {
CHECK_DATE(this);
return LOCAL_DAY(this);
}
// ECMA 262 - 15.9.5.15
function DateGetUTCDate() {
CHECK_DATE(this);
return UTC_DAY(this);
}
// ECMA 262 - 15.9.5.16
function DateGetDay() {
CHECK_DATE(this);
return LOCAL_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
CHECK_DATE(this);
return UTC_WEEKDAY(this);
}
// ECMA 262 - 15.9.5.18
function DateGetHours() {
CHECK_DATE(this);
return LOCAL_HOUR(this);
}
// ECMA 262 - 15.9.5.19
function DateGetUTCHours() {
CHECK_DATE(this);
return UTC_HOUR(this);
}
// ECMA 262 - 15.9.5.20
function DateGetMinutes() {
CHECK_DATE(this);
return LOCAL_MIN(this);
}
// ECMA 262 - 15.9.5.21
function DateGetUTCMinutes() {
CHECK_DATE(this);
return UTC_MIN(this);
}
// ECMA 262 - 15.9.5.22
function DateGetSeconds() {
CHECK_DATE(this);
return LOCAL_SEC(this);
}
// ECMA 262 - 15.9.5.23
function DateGetUTCSeconds() {
CHECK_DATE(this);
return UTC_SEC(this)
}
// ECMA 262 - 15.9.5.24
function DateGetMilliseconds() {
CHECK_DATE(this);
return LOCAL_MS(this);
}
// ECMA 262 - 15.9.5.25
function DateGetUTCMilliseconds() {
CHECK_DATE(this);
return UTC_MS(this);
}
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
CHECK_DATE(this);
return TIMEZONE_OFFSET(this);
}
@ -512,7 +488,6 @@ function DateSetTime(ms) {
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
@ -522,7 +497,6 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(UTC_HOUR(this),
@ -535,7 +509,6 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
@ -546,7 +519,6 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
@ -557,7 +529,6 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@ -570,7 +541,6 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
@ -583,7 +553,6 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@ -597,7 +566,6 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
@ -611,7 +579,6 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
@ -621,7 +588,6 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
@ -631,7 +597,6 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
@ -642,7 +607,6 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
@ -653,7 +617,6 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
CHECK_DATE(this);
var t = LOCAL_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@ -674,7 +637,6 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
year = ToNumber(year);
var argc = %_ArgumentsLength();
@ -695,7 +657,6 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
@ -709,7 +670,6 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
CHECK_DATE(this);
return LOCAL_YEAR(this) - 1900;
}
@ -757,7 +717,6 @@ function PadInt(n, digits) {
// ECMA 262 - 15.9.5.43
function DateToISOString() {
CHECK_DATE(this);
var t = UTC_DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
var year = this.getUTCFullYear();

38
deps/v8/src/debug-debugger.js

@ -1449,6 +1449,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
} else if (request.command == 'restartframe') {
this.restartFrameRequest_(request, response);
} else if (request.command == 'flags') {
this.debuggerFlagsRequest_(request, response);
} else if (request.command == 'v8flags') {
@ -2076,7 +2078,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
// Global evaluate.
if (global) {
// Evaluate in the global context.
// Evaluate in the native context.
response.body = this.exec_state_.evaluateGlobal(
expression, Boolean(disable_break), additional_context_object);
return;
@ -2358,9 +2360,6 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
DebugCommandProcessor.prototype.changeLiveRequest_ = function(
request, response) {
if (!Debug.LiveEdit) {
return response.failed('LiveEdit feature is not supported');
}
if (!request.arguments) {
return response.failed('Missing arguments');
}
@ -2398,6 +2397,37 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
};
DebugCommandProcessor.prototype.restartFrameRequest_ = function(
request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
}
var frame = request.arguments.frame;
// No frames to evaluate in frame.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No frames');
}
var frame_mirror;
// Check whether a frame was specified.
if (!IS_UNDEFINED(frame)) {
var frame_number = %ToNumber(frame);
if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
return response.failed('Invalid frame "' + frame + '"');
}
// Restart specified frame.
frame_mirror = this.exec_state_.frame(frame_number);
} else {
// Restart selected frame.
frame_mirror = this.exec_state_.frame();
}
var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
response.body = {result: result_description};
};
DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
response) {
// Check for legal request.

335
deps/v8/src/debug.cc

@ -97,8 +97,8 @@ static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
// Isolate::context() may have been NULL when "script collected" event
// occured.
if (context.is_null()) return v8::Local<v8::Context>();
Handle<Context> global_context(context->global_context());
return v8::Utils::ToLocal(global_context);
Handle<Context> native_context(context->native_context());
return v8::Utils::ToLocal(native_context);
}
@ -698,7 +698,7 @@ void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
// the function will live in the heap until next gc, and can be found by
// Runtime::FindSharedFunctionInfoInScript.
// Debug::FindSharedFunctionInfoInScript.
BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
it.ClearAllDebugBreak();
debug->RemoveDebugInfo(node->debug_info());
@ -745,12 +745,15 @@ bool Debug::CompileDebuggerScript(int index) {
isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> script_name = factory->NewStringFromAscii(name);
Handle<Context> context = isolate->native_context();
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::Compile(source_code,
script_name,
0, 0, NULL, NULL,
0, 0,
context,
NULL, NULL,
Handle<String>::null(),
NATIVES_CODE);
@ -762,13 +765,12 @@ bool Debug::CompileDebuggerScript(int index) {
}
// Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context();
bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
Execution::TryCall(function, Handle<Object>(context->global()),
Execution::TryCall(function, Handle<Object>(context->global_object()),
0, NULL, &caught_exception);
// Check for caught exceptions.
@ -829,7 +831,7 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
@ -992,12 +994,11 @@ Object* Debug::Break(Arguments args) {
// Check that we indeed found the frame we are looking for.
CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
if (step_count > 1) {
// Save old count and action to continue stepping after
// StepOut
// Save old count and action to continue stepping after StepOut.
thread_local_.queued_step_count_ = step_count - 1;
}
// Set up for StepOut to reach target frame
// Set up for StepOut to reach target frame.
step_action = StepOut;
step_count = count;
}
@ -1096,7 +1097,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global()->GetPropertyNoExceptionThrown(
debug_context()->global_object()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
@ -1136,14 +1137,16 @@ Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
}
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
void Debug::SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
if (!EnsureDebugInfo(shared)) {
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if retrieving debug info failed.
return;
}
@ -1164,6 +1167,50 @@ void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
}
bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
int* source_position) {
HandleScope scope(isolate_);
PrepareForBreakPoints();
// Obtain shared function info for the function.
Object* result = FindSharedFunctionInfoInScript(script, *source_position);
if (result->IsUndefined()) return false;
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
// Return if retrieving debug info failed.
return false;
}
// Find position within function. The script position might be before the
// source position of the first function.
int position;
if (shared->start_position() > *source_position) {
position = 0;
} else {
position = *source_position - shared->start_position();
}
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Source positions starts with zero.
ASSERT(position >= 0);
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
it.FindBreakLocationFromPosition(position);
it.SetBreakPoint(break_point_object);
*source_position = it.position() + shared->start_position();
// At least one active break point now.
ASSERT(debug_info->GetBreakPointCount() > 0);
return true;
}
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
HandleScope scope(isolate_);
@ -1215,10 +1262,12 @@ void Debug::ClearAllBreakPoints() {
}
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
void Debug::FloodWithOneShot(Handle<JSFunction> function) {
PrepareForBreakPoints();
// Make sure the function has set up the debug info.
if (!EnsureDebugInfo(shared)) {
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@ -1238,8 +1287,8 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
Debug::FloodWithOneShot(shared_info);
Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
Debug::FloodWithOneShot(bindee_function);
}
}
@ -1254,11 +1303,9 @@ void Debug::FloodHandlerWithOneShot() {
for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(
JSFunction::cast(frame->function())->shared());
// Flood the function with the catch block with break points
FloodWithOneShot(shared);
JSFunction* function = JSFunction::cast(frame->function());
FloodWithOneShot(Handle<JSFunction>(function));
return;
}
}
@ -1325,14 +1372,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
if (!EnsureDebugInfo(shared)) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
return;
}
@ -1402,7 +1449,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
}
@ -1412,7 +1459,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Step next or step min.
// Fill the current function with one-shot break points.
FloodWithOneShot(shared);
FloodWithOneShot(function);
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
@ -1424,9 +1471,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
if (is_at_restarted_function) {
Handle<JSFunction> restarted_function(
JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
Handle<SharedFunctionInfo> restarted_shared(
restarted_function->shared());
FloodWithOneShot(restarted_shared);
FloodWithOneShot(restarted_function);
} else if (!call_function_stub.is_null()) {
// If it's CallFunction stub ensure target function is compiled and flood
// it with one shot breakpoints.
@ -1468,7 +1513,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
} else if (!js_function->IsBuiltin()) {
// Don't step into builtins.
// It will also compile target function if it's not compiled yet.
FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
FloodWithOneShot(js_function);
}
}
}
@ -1477,7 +1522,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// a call target as the function called might be a native function for
// which step in will not stop. It also prepares for stepping in
// getters/setters.
FloodWithOneShot(shared);
FloodWithOneShot(function);
if (is_load_or_store) {
// Remember source position and frame to handle step in getter/setter. If
@ -1677,12 +1722,11 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
// function.
if (!holder.is_null() && holder->IsJSFunction() &&
!JSFunction::cast(*holder)->IsBuiltin()) {
Handle<SharedFunctionInfo> shared_info(
JSFunction::cast(*holder)->shared());
Debug::FloodWithOneShot(shared_info);
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
Debug::FloodWithOneShot(js_function);
}
} else {
Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
Debug::FloodWithOneShot(function);
}
}
}
@ -1762,7 +1806,7 @@ static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
Handle<Code> current_code) {
ASSERT(!current_code->has_debug_break_slots());
CompilationInfo info(function);
CompilationInfoWithZone info(function);
info.MarkCompilingForDebugging(current_code);
ASSERT(!info.shared_info()->is_compiled());
ASSERT(!info.isolate()->has_pending_exception());
@ -1834,29 +1878,48 @@ static void RedirectActivationsToRecompiledCodeOnThread(
continue;
}
intptr_t delta = frame->pc() - frame_code->instruction_start();
int debug_break_slot_count = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
// Iterate over the RelocInfo in the original code to compute the sum of the
// constant pools sizes. (See Assembler::CheckConstPool())
// Note that this is only useful for architectures using constant pools.
int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
int frame_const_pool_size = 0;
for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() >= frame->pc()) break;
frame_const_pool_size += static_cast<int>(info->data());
}
intptr_t frame_offset =
frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
// Iterate over the RelocInfo for new code to find the number of bytes
// generated for debug slots and constant pools.
int debug_break_slot_bytes = 0;
int new_code_const_pool_size = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::CONST_POOL);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
// Check if the pc in the new code with debug break
// slots is before this slot.
RelocInfo* info = it.rinfo();
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
intptr_t new_delta =
info->pc() -
new_code->instruction_start() -
debug_break_slot_bytes;
if (new_delta > delta) {
intptr_t new_offset = info->pc() - new_code->instruction_start() -
new_code_const_pool_size - debug_break_slot_bytes;
if (new_offset >= frame_offset) {
break;
}
// Passed a debug break slot in the full code with debug
// break slots.
debug_break_slot_count++;
if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
} else {
ASSERT(RelocInfo::IsConstPool(info->rmode()));
// The size of the constant pool is encoded in the data.
new_code_const_pool_size += static_cast<int>(info->data());
}
}
int debug_break_slot_bytes =
debug_break_slot_count * Assembler::kDebugBreakSlotLength;
// Compute the equivalent pc in the new code.
byte* new_pc = new_code->instruction_start() + frame_offset +
debug_break_slot_bytes + new_code_const_pool_size;
if (FLAG_trace_deopt) {
PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
"with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@ -1873,14 +1936,12 @@ static void RedirectActivationsToRecompiledCodeOnThread(
new_code->instruction_size(),
new_code->instruction_size(),
reinterpret_cast<intptr_t>(frame->pc()),
reinterpret_cast<intptr_t>(new_code->instruction_start()) +
delta + debug_break_slot_bytes);
reinterpret_cast<intptr_t>(new_pc));
}
// Patch the return address to return into the code with
// debug break slots.
frame->set_pc(
new_code->instruction_start() + delta + debug_break_slot_bytes);
frame->set_pc(new_pc);
}
}
@ -1922,6 +1983,9 @@ void Debug::PrepareForBreakPoints() {
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
// There will be at least one break point when we are done.
has_break_points_ = true;
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
@ -1999,7 +2063,6 @@ void Debug::PrepareForBreakPoints() {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
@ -2028,16 +2091,130 @@ void Debug::PrepareForBreakPoints() {
}
Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int position) {
// Iterate the heap looking for SharedFunctionInfo generated from the
// script. The inner most SharedFunctionInfo containing the source position
// for the requested break point is found.
// NOTE: This might require several heap iterations. If the SharedFunctionInfo
// which is found is not compiled it is compiled and the heap is iterated
// again as the compilation might create inner functions from the newly
// compiled function and the actual requested break point might be in one of
// these functions.
// NOTE: The below fix-point iteration depends on all functions that cannot be
// compiled lazily without a context to not be compiled at all. Compilation
// will be triggered at points where we do not need a context.
bool done = false;
// The current candidate for the source position:
int target_start_position = RelocInfo::kNoPosition;
Handle<JSFunction> target_function;
Handle<SharedFunctionInfo> target;
while (!done) {
{ // Extra scope for iterator and no-allocation.
isolate_->heap()->EnsureHeapIsIterable();
AssertNoAllocation no_alloc_during_heap_iteration;
HeapIterator iterator;
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
bool found_next_candidate = false;
Handle<JSFunction> function;
Handle<SharedFunctionInfo> shared;
if (obj->IsJSFunction()) {
function = Handle<JSFunction>(JSFunction::cast(obj));
shared = Handle<SharedFunctionInfo>(function->shared());
ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
found_next_candidate = true;
} else if (obj->IsSharedFunctionInfo()) {
shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
// Skip functions that we cannot compile lazily without a context,
// which is not available here, because there is no closure.
found_next_candidate = shared->is_compiled() ||
shared->allows_lazy_compilation_without_context();
}
if (!found_next_candidate) continue;
if (shared->script() == *script) {
// If the SharedFunctionInfo found has the requested script data and
// contains the source position it is a candidate.
int start_position = shared->function_token_position();
if (start_position == RelocInfo::kNoPosition) {
start_position = shared->start_position();
}
if (start_position <= position &&
position <= shared->end_position()) {
// If there is no candidate or this function is within the current
// candidate this is the new candidate.
if (target.is_null()) {
target_start_position = start_position;
target_function = function;
target = shared;
} else {
if (target_start_position == start_position &&
shared->end_position() == target->end_position()) {
// If a top-level function contains only one function
// declaration the source for the top-level and the function
// is the same. In that case prefer the non top-level function.
if (!shared->is_toplevel()) {
target_start_position = start_position;
target_function = function;
target = shared;
}
} else if (target_start_position <= start_position &&
shared->end_position() <= target->end_position()) {
// This containment check includes equality as a function
// inside a top-level function can share either start or end
// position with the top-level function.
target_start_position = start_position;
target_function = function;
target = shared;
}
}
}
}
} // End for loop.
} // End no-allocation scope.
if (target.is_null()) {
return isolate_->heap()->undefined_value();
}
// There will be at least one break point when we are done.
has_break_points_ = true;
// If the candidate found is compiled we are done.
done = target->is_compiled();
if (!done) {
// If the candidate is not compiled, compile it to reveal any inner
// functions which might contain the requested source position. This
// will compile all inner functions that cannot be compiled without a
// context, because Compiler::BuildFunctionInfo checks whether the
// debugger is active.
if (target_function.is_null()) {
SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
} else {
JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
}
}
} // End while loop.
return *target;
}
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function) {
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
return true;
}
// Ensure shared in compiled. Return false if this failed.
if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
// There will be at least one break point when we are done.
has_break_points_ = true;
// Ensure function is compiled. Return false if this failed.
if (!function.is_null() &&
!JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
@ -2049,9 +2226,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
node->set_next(debug_info_list_);
debug_info_list_ = node;
// Now there is at least one break point.
has_break_points_ = true;
return true;
}
@ -2093,9 +2267,9 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
if (!EnsureDebugInfo(shared)) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return;
}
@ -2185,9 +2359,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
PrepareForBreakPoints();
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
if (!EnsureDebugInfo(shared)) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if we failed to retrieve the debug info.
return false;
}
@ -2218,7 +2392,9 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
FrameDropMode mode,
Object** restarter_frame_function_pointer) {
thread_local_.frame_drop_mode_ = mode;
if (mode != CURRENTLY_SET_MODE) {
thread_local_.frame_drop_mode_ = mode;
}
thread_local_.break_frame_id_ = new_break_frame_id;
thread_local_.restarter_frame_function_pointer_ =
restarter_frame_function_pointer;
@ -2233,7 +2409,7 @@ const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global();
return IsLoaded() && global == debug_context()->global_object();
}
@ -2245,12 +2421,13 @@ void Debug::ClearMirrorCache() {
// Clear the mirror cache.
Handle<String> function_name =
isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
Handle<Object> fun(
Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
Handle<JSObject>(Debug::debug_context()->global()),
Handle<JSObject>(Debug::debug_context()->global_object()),
0, NULL, &caught_exception);
}
@ -2337,6 +2514,7 @@ Debugger::Debugger(Isolate* isolate)
event_listener_data_(Handle<Object>()),
compiling_natives_(false),
is_loading_debugger_(false),
live_edit_enabled_(true),
never_unload_debugger_(false),
force_debugger_active_(false),
message_handler_(NULL),
@ -2372,7 +2550,8 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<String> constructor_str =
isolate_->factory()->LookupSymbol(constructor_name);
Handle<Object> constructor(
isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
isolate_->global_object()->GetPropertyNoExceptionThrown(
*constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@ -2380,7 +2559,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()),
Handle<JSObject>(isolate_->debug()->debug_context()->global_object()),
argc,
argv,
caught_exception);
@ -2602,7 +2781,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<String> update_script_break_points_symbol =
isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
Handle<Object>(debug->debug_context()->global()->
Handle<Object>(debug->debug_context()->global_object()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
@ -2758,7 +2937,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
event_listener_data_ };
bool caught_exception;
Execution::TryCall(fun,
isolate_->global(),
isolate_->global_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);

23
deps/v8/src/debug.h

@ -233,12 +233,15 @@ class Debug {
void Iterate(ObjectVisitor* v);
Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared,
void SetBreakPoint(Handle<JSFunction> function,
Handle<Object> break_point_object,
int* source_position);
bool SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
int* source_position);
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
void FloodWithOneShot(Handle<JSFunction> function);
void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
@ -254,8 +257,14 @@ class Debug {
void PrepareForBreakPoints();
// Returns whether the operation succeeded.
bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
// This function is used in FunctionNameUsing* tests.
Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
// Returns whether the operation succeeded. Compilation can only be triggered
// if a valid closure is passed as the second argument, otherwise the shared
// function needs to be compiled already.
bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
@ -434,7 +443,8 @@ class Debug {
// The top JS frame had been calling some C++ function. The return address
// gets patched automatically.
FRAME_DROPPED_IN_DIRECT_CALL,
FRAME_DROPPED_IN_RETURN_CALL
FRAME_DROPPED_IN_RETURN_CALL,
CURRENTLY_SET_MODE
};
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
@ -865,6 +875,8 @@ class Debugger {
bool compiling_natives() const { return compiling_natives_; }
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
bool live_edit_enabled() const { return live_edit_enabled_; }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
@ -893,6 +905,7 @@ class Debugger {
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
bool is_loading_debugger_; // Are we loading the debugger?
bool live_edit_enabled_; // Enable LiveEdit.
bool never_unload_debugger_; // Can we unload the debugger?
bool force_debugger_active_; // Activate debugger without event listeners.
v8::Debug::MessageHandler2 message_handler_;

290
deps/v8/src/deoptimizer.cc

@ -268,20 +268,29 @@ void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
AssertNoAllocation no_allocation;
ASSERT(context->IsGlobalContext());
ASSERT(context->IsNativeContext());
visitor->EnterContext(context);
// Run through the list of optimized functions and deoptimize them.
// Create a snapshot of the optimized functions list. This is needed because
// visitors might remove more than one link from the list at once.
ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
Object* element = context->OptimizedFunctionsListHead();
while (!element->IsUndefined()) {
JSFunction* element_function = JSFunction::cast(element);
// Get the next link before deoptimizing as deoptimizing will clear the
// next link.
snapshot.Add(element_function, isolate->runtime_zone());
element = element_function->next_function_link();
visitor->VisitFunction(element_function);
}
// Run through the snapshot of optimized functions and visit them.
for (int i = 0; i < snapshot.length(); ++i) {
visitor->VisitFunction(snapshot.at(i));
}
visitor->LeaveContext(context);
}
@ -294,10 +303,10 @@ void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
VisitAllOptimizedFunctionsForContext(
GlobalObject::cast(proto)->global_context(), visitor);
GlobalObject::cast(proto)->native_context(), visitor);
} else if (object->IsGlobalObject()) {
VisitAllOptimizedFunctionsForContext(
GlobalObject::cast(object)->global_context(), visitor);
GlobalObject::cast(object)->native_context(), visitor);
}
}
@ -306,12 +315,12 @@ void Deoptimizer::VisitAllOptimizedFunctions(
OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
// Run through the list of all global contexts and deoptimize.
Object* context = Isolate::Current()->heap()->global_contexts_list();
// Run through the list of all native contexts and deoptimize.
Object* context = Isolate::Current()->heap()->native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the global field of the context can be undefined.
Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
if (!global->IsUndefined()) {
VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
visitor);
@ -484,19 +493,18 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
unsigned id,
BailoutId id,
SharedFunctionInfo* shared) {
// TODO(kasperl): For now, we do a simple linear search for the PC
// offset associated with the given node id. This should probably be
// changed to a binary search.
int length = data->DeoptPoints();
Smi* smi_id = Smi::FromInt(id);
for (int i = 0; i < length; i++) {
if (data->AstId(i) == smi_id) {
if (data->AstId(i) == id) {
return data->PcAndState(i)->value();
}
}
PrintF("[couldn't find pc offset for node=%u]\n", id);
PrintF("[couldn't find pc offset for node=%d]\n", id.ToInt());
PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
// Print the source code if available.
HeapStringAllocator string_allocator;
@ -543,7 +551,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
unsigned node_id = input_data->AstId(bailout_id_)->value();
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
@ -581,7 +589,24 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::CONSTRUCT_STUB_FRAME:
DoComputeConstructStubFrame(&iterator, i);
break;
default:
case Translation::GETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, false);
break;
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
case Translation::DUPLICATE:
UNREACHABLE();
break;
}
@ -595,9 +620,9 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
node_id,
node_id.ToInt(),
output_[index]->GetPc(),
FullCodeGenerator::State2String(
static_cast<FullCodeGenerator::State>(
@ -700,6 +725,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@ -748,6 +775,34 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
case Translation::UINT32_REGISTER: {
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (FLAG_trace_deopt) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
" ; uint %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
value,
converter.NameOfCPURegister(input_reg),
is_smi ? "smi" : "heap number");
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
}
return;
}
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
@ -813,6 +868,36 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
return;
}
case Translation::UINT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index);
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
output_offset,
value,
input_offset,
is_smi ? "smi" : "heap number");
}
if (is_smi) {
intptr_t tagged_value =
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
} else {
// We save the untagged value on the side and store a GC-safe
// temporary placeholder in the frame.
AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
static_cast<double>(static_cast<uint32_t>(value)));
output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
}
return;
}
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
@ -865,6 +950,56 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
static bool ObjectToInt32(Object* obj, int32_t* value) {
if (obj->IsSmi()) {
*value = Smi::cast(obj)->value();
return true;
}
if (obj->IsHeapNumber()) {
double num = HeapNumber::cast(obj)->value();
if (FastI2D(FastD2I(num)) != num) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to int32 ****\n",
HeapNumber::cast(obj)->value());
}
return false;
}
*value = FastD2I(num);
return true;
}
return false;
}
static bool ObjectToUint32(Object* obj, uint32_t* value) {
if (obj->IsSmi()) {
if (Smi::cast(obj)->value() < 0) return false;
*value = static_cast<uint32_t>(Smi::cast(obj)->value());
return true;
}
if (obj->IsHeapNumber()) {
double num = HeapNumber::cast(obj)->value();
if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to uint32 ****\n",
HeapNumber::cast(obj)->value());
}
return false;
}
*value = FastD2UI(num);
return true;
}
return false;
}
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
@ -887,6 +1022,8 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@ -904,22 +1041,10 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_REGISTER: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
int32_t int32_value = 0;
if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_reg = iterator->Next();
int int32_value = input_object->IsSmi()
? Smi::cast(input_object)->value()
: FastD2I(input_object->Number());
// Abort the translation if the conversion lost information.
if (!input_object->IsSmi() &&
FastI2D(int32_value) != input_object->Number()) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to int32 ****\n",
input_object->Number());
}
return false;
}
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
@ -930,6 +1055,21 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
case Translation::UINT32_REGISTER: {
uint32_t uint32_value = 0;
if (!ObjectToUint32(input_object, &uint32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- %u (uint32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
uint32_value,
*input_offset);
}
output->SetRegister(output_reg, static_cast<int32_t>(uint32_value));
}
case Translation::DOUBLE_REGISTER: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
@ -963,24 +1103,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
case Translation::INT32_STACK_SLOT: {
// Abort OSR if we don't have a number.
if (!input_object->IsNumber()) return false;
int32_t int32_value = 0;
if (!ObjectToInt32(input_object, &int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
int int32_value = input_object->IsSmi()
? Smi::cast(input_object)->value()
: DoubleToInt32(input_object->Number());
// Abort the translation if the conversion lost information.
if (!input_object->IsSmi() &&
FastI2D(int32_value) != input_object->Number()) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to int32 ****\n",
input_object->Number());
}
return false;
}
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
@ -991,6 +1119,23 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
break;
}
case Translation::UINT32_STACK_SLOT: {
uint32_t uint32_value = 0;
if (!ObjectToUint32(input_object, &uint32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n",
output_offset,
uint32_value,
*input_offset);
}
output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value));
break;
}
case Translation::DOUBLE_STACK_SLOT: {
static const int kLowerOffset = 0 * kPointerSize;
static const int kUpperOffset = 1 * kPointerSize;
@ -1342,6 +1487,18 @@ void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
}
void Translation::BeginGetterStubFrame(int literal_id) {
buffer_->Add(GETTER_STUB_FRAME, zone());
buffer_->Add(literal_id, zone());
}
void Translation::BeginSetterStubFrame(int literal_id) {
buffer_->Add(SETTER_STUB_FRAME, zone());
buffer_->Add(literal_id, zone());
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
buffer_->Add(literal_id, zone());
@ -1349,9 +1506,11 @@ void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
}
void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
void Translation::BeginJSFrame(BailoutId node_id,
int literal_id,
unsigned height) {
buffer_->Add(JS_FRAME, zone());
buffer_->Add(node_id, zone());
buffer_->Add(node_id.ToInt(), zone());
buffer_->Add(literal_id, zone());
buffer_->Add(height, zone());
}
@ -1369,6 +1528,12 @@ void Translation::StoreInt32Register(Register reg) {
}
void Translation::StoreUint32Register(Register reg) {
buffer_->Add(UINT32_REGISTER, zone());
buffer_->Add(reg.code(), zone());
}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER, zone());
buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone());
@ -1387,6 +1552,12 @@ void Translation::StoreInt32StackSlot(int index) {
}
void Translation::StoreUint32StackSlot(int index) {
buffer_->Add(UINT32_STACK_SLOT, zone());
buffer_->Add(index, zone());
}
void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT, zone());
buffer_->Add(index, zone());
@ -1414,11 +1585,15 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case ARGUMENTS_OBJECT:
case DUPLICATE:
return 0;
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
case DOUBLE_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
@ -1446,16 +1621,24 @@ const char* Translation::StringFor(Opcode opcode) {
return "ARGUMENTS_ADAPTOR_FRAME";
case CONSTRUCT_STUB_FRAME:
return "CONSTRUCT_STUB_FRAME";
case GETTER_STUB_FRAME:
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
return "INT32_REGISTER";
case UINT32_REGISTER:
return "UINT32_REGISTER";
case DOUBLE_REGISTER:
return "DOUBLE_REGISTER";
case STACK_SLOT:
return "STACK_SLOT";
case INT32_STACK_SLOT:
return "INT32_STACK_SLOT";
case UINT32_STACK_SLOT:
return "UINT32_STACK_SLOT";
case DOUBLE_STACK_SLOT:
return "DOUBLE_STACK_SLOT";
case LITERAL:
@ -1502,6 +1685,8 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
// Peeled off before getting here.
break;
@ -1511,6 +1696,7 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
@ -1530,6 +1716,12 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
return SlotRef(slot_addr, SlotRef::INT32);
}
case Translation::UINT32_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::UINT32);
}
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
@ -1569,7 +1761,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
int inlined_jsframe_index,
int formal_parameter_count) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),

33
deps/v8/src/deoptimizer.h

@ -62,13 +62,13 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
virtual ~OptimizedFunctionVisitor() {}
// Function which is called before iteration of any optimized functions
// from given global context.
// from given native context.
virtual void EnterContext(Context* context) = 0;
virtual void VisitFunction(JSFunction* function) = 0;
// Function which is called after iteration of all optimized functions
// from given global context.
// from given native context.
virtual void LeaveContext(Context* context) = 0;
};
@ -211,7 +211,7 @@ class Deoptimizer : public Malloced {
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
unsigned node_id,
BailoutId node_id,
SharedFunctionInfo* shared);
// Code generation support.
@ -284,6 +284,9 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index);
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@ -559,12 +562,16 @@ class Translation BASE_EMBEDDED {
BEGIN,
JS_FRAME,
CONSTRUCT_STUB_FRAME,
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
DOUBLE_REGISTER,
STACK_SLOT,
INT32_STACK_SLOT,
UINT32_STACK_SLOT,
DOUBLE_STACK_SLOT,
LITERAL,
ARGUMENTS_OBJECT,
@ -587,14 +594,18 @@ class Translation BASE_EMBEDDED {
int index() const { return index_; }
// Commands.
void BeginJSFrame(int node_id, int literal_id, unsigned height);
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
void BeginSetterStubFrame(int literal_id);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject();
@ -608,6 +619,9 @@ class Translation BASE_EMBEDDED {
static const char* StringFor(Opcode opcode);
#endif
// A literal id which refers to the JSFunction itself.
static const int kSelfLiteralId = -239;
private:
TranslationBuffer* buffer_;
int index_;
@ -641,6 +655,7 @@ class SlotRef BASE_EMBEDDED {
UNKNOWN,
TAGGED,
INT32,
UINT32,
DOUBLE,
LITERAL
};
@ -668,6 +683,16 @@ class SlotRef BASE_EMBEDDED {
}
}
case UINT32: {
uint32_t value = Memory::uint32_at(addr_);
if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
} else {
return Isolate::Current()->factory()->NewNumber(
static_cast<double>(value));
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
return Isolate::Current()->factory()->NewNumber(value);

4
deps/v8/src/disassembler.cc

@ -244,8 +244,8 @@ static int DecodeIt(FILE* f,
out.AddFormatted(" %s, %s", Code::Kind2String(kind),
Code::ICState2String(ic_state));
if (ic_state == MONOMORPHIC) {
PropertyType type = code->type();
out.AddFormatted(", %s", Code::PropertyType2String(type));
Code::StubType type = code->type();
out.AddFormatted(", %s", Code::StubType2String(type));
}
if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());

2
deps/v8/src/elements.cc

@ -800,7 +800,7 @@ class FastElementsAccessor
}
} else {
// Otherwise, fill the unused tail with holes.
int old_length = FastD2I(array->length()->Number());
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
backing_store->set_the_hole(i);
}

59
deps/v8/src/execution.cc

@ -100,7 +100,7 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
ASSERT(function->context()->global()->IsGlobalObject());
ASSERT(function->context()->global_object()->IsGlobalObject());
{
// Save and restore context around invocation and block the
@ -165,10 +165,10 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (convert_receiver && !receiver->IsJSReceiver() &&
!func->shared()->native() && func->shared()->is_classic_mode()) {
if (receiver->IsUndefined() || receiver->IsNull()) {
Object* global = func->context()->global()->global_receiver();
Object* global = func->context()->global_object()->global_receiver();
// Under some circumstances, 'global' can be the JSBuiltinsObject
// In that case, don't rewrite.
// (FWIW, the same holds for GetIsolate()->global()->global_receiver().)
// In that case, don't rewrite. (FWIW, the same holds for
// GetIsolate()->global_object()->global_receiver().)
if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
} else {
receiver = ToObject(receiver, pending_exception);
@ -184,7 +184,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, argv,
return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
pending_exception);
}
@ -246,7 +246,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
isolate->global_context()->call_as_function_delegate());
isolate->native_context()->call_as_function_delegate());
}
return factory->undefined_value();
@ -270,7 +270,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
isolate->global_context()->call_as_function_delegate());
isolate->native_context()->call_as_function_delegate());
}
// If the Object doesn't have an instance-call handler we should
@ -303,7 +303,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
isolate->global_context()->call_as_constructor_delegate());
isolate->native_context()->call_as_constructor_delegate());
}
return isolate->factory()->undefined_value();
@ -331,7 +331,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
if (fun->IsHeapObject() &&
HeapObject::cast(fun)->map()->has_instance_call_handler()) {
return Handle<JSFunction>(
isolate->global_context()->call_as_constructor_delegate());
isolate->native_context()->call_as_constructor_delegate());
}
// If the Object doesn't have an instance-call handler we should
@ -446,6 +446,25 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= CODE_READY;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
ExecutionAccess::Unlock(isolate_);
}
}
bool StackGuard::IsCodeReadyEvent() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
}
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
@ -661,7 +680,7 @@ Handle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags,
bool* exc) {
Handle<JSFunction> function = Handle<JSFunction>(
pattern->GetIsolate()->global_context()->regexp_function());
pattern->GetIsolate()->native_context()->regexp_function());
Handle<Object> re_obj = RegExpImpl::CreateRegExpLiteral(
function, pattern, flags, exc);
if (*exc) return Handle<JSRegExp>();
@ -707,7 +726,7 @@ Handle<JSFunction> Execution::InstantiateFunction(
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
Object* elm =
isolate->global_context()->function_cache()->
isolate->native_context()->function_cache()->
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
@ -832,6 +851,11 @@ Object* Execution::DebugBreakHelper() {
return isolate->heap()->undefined_value();
}
// Ignore debug break if debugger is not active.
if (!isolate->debugger()->IsDebuggerActive()) {
return isolate->heap()->undefined_value();
}
StackLimitCheck check(isolate);
if (check.HasOverflowed()) {
return isolate->heap()->undefined_value();
@ -846,7 +870,7 @@ Object* Execution::DebugBreakHelper() {
if (JSFunction::cast(fun)->IsBuiltin()) {
return isolate->heap()->undefined_value();
}
GlobalObject* global = JSFunction::cast(fun)->context()->global();
GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (isolate->debug()->IsDebugGlobal(global)) {
return isolate->heap()->undefined_value();
@ -906,6 +930,17 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
if (stack_guard->IsCodeReadyEvent()) {
ASSERT(FLAG_parallel_recompilation);
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** CODE_READY event received.\n");
}
stack_guard->Continue(CODE_READY);
}
if (!stack_guard->IsTerminateExecution()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||

5
deps/v8/src/execution.h

@ -42,7 +42,8 @@ enum InterruptFlag {
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
GC_REQUEST = 1 << 6
GC_REQUEST = 1 << 6,
CODE_READY = 1 << 7
};
@ -195,6 +196,8 @@ class StackGuard {
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
bool IsDebugBreak();
void DebugBreak();

153
deps/v8/src/extensions/statistics-extension.cc

@ -0,0 +1,153 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "statistics-extension.h"
namespace v8 {
namespace internal {
const char* const StatisticsExtension::kSource =
"native function getV8Statistics();";
v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunction(
v8::Handle<v8::String> str) {
ASSERT(strcmp(*v8::String::AsciiValue(str), "getV8Statistics") == 0);
return v8::FunctionTemplate::New(StatisticsExtension::GetCounters);
}
static void AddCounter(v8::Local<v8::Object> object,
StatsCounter* counter,
const char* name) {
if (counter->Enabled()) {
object->Set(v8::String::New(name),
v8::Number::New(*counter->GetInternalPointer()));
}
}
static void AddNumber(v8::Local<v8::Object> object,
intptr_t value,
const char* name) {
object->Set(v8::String::New(name),
v8::Number::New(static_cast<double>(value)));
}
v8::Handle<v8::Value> StatisticsExtension::GetCounters(
const v8::Arguments& args) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
if (args.Length() > 0) { // GC if first argument evaluates to true.
if (args[0]->IsBoolean() && args[0]->ToBoolean()->Value()) {
heap->CollectAllGarbage(Heap::kNoGCFlags, "counters extension");
}
}
Counters* counters = isolate->counters();
v8::Local<v8::Object> result = v8::Object::New();
#define ADD_COUNTER(name, caption) \
AddCounter(result, counters->name(), #name);
STATS_COUNTER_LIST_1(ADD_COUNTER)
STATS_COUNTER_LIST_2(ADD_COUNTER)
#undef ADD_COUNTER
#define ADD_COUNTER(name) \
AddCounter(result, counters->count_of_##name(), "count_of_" #name); \
AddCounter(result, counters->size_of_##name(), "size_of_" #name);
INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
#define ADD_COUNTER(name) \
AddCounter(result, counters->count_of_CODE_TYPE_##name(), \
"count_of_CODE_TYPE_" #name); \
AddCounter(result, counters->size_of_CODE_TYPE_##name(), \
"size_of_CODE_TYPE_" #name);
CODE_KIND_LIST(ADD_COUNTER)
#undef ADD_COUNTER
#define ADD_COUNTER(name) \
AddCounter(result, counters->count_of_FIXED_ARRAY_##name(), \
"count_of_FIXED_ARRAY_" #name); \
AddCounter(result, counters->size_of_FIXED_ARRAY_##name(), \
"size_of_FIXED_ARRAY_" #name);
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADD_COUNTER)
#undef ADD_COUNTER
AddNumber(result, isolate->memory_allocator()->Size(),
"total_committed_bytes");
AddNumber(result, heap->new_space()->Size(),
"new_space_live_bytes");
AddNumber(result, heap->new_space()->Available(),
"new_space_available_bytes");
AddNumber(result, heap->new_space()->CommittedMemory(),
"new_space_commited_bytes");
AddNumber(result, heap->old_pointer_space()->Size(),
"old_pointer_space_live_bytes");
AddNumber(result, heap->old_pointer_space()->Available(),
"old_pointer_space_available_bytes");
AddNumber(result, heap->old_pointer_space()->CommittedMemory(),
"old_pointer_space_commited_bytes");
AddNumber(result, heap->old_data_space()->Size(),
"old_data_space_live_bytes");
AddNumber(result, heap->old_data_space()->Available(),
"old_data_space_available_bytes");
AddNumber(result, heap->old_data_space()->CommittedMemory(),
"old_data_space_commited_bytes");
AddNumber(result, heap->code_space()->Size(),
"code_space_live_bytes");
AddNumber(result, heap->code_space()->Available(),
"code_space_available_bytes");
AddNumber(result, heap->code_space()->CommittedMemory(),
"code_space_commited_bytes");
AddNumber(result, heap->cell_space()->Size(),
"cell_space_live_bytes");
AddNumber(result, heap->cell_space()->Available(),
"cell_space_available_bytes");
AddNumber(result, heap->cell_space()->CommittedMemory(),
"cell_space_commited_bytes");
AddNumber(result, heap->lo_space()->Size(),
"lo_space_live_bytes");
AddNumber(result, heap->lo_space()->Available(),
"lo_space_available_bytes");
AddNumber(result, heap->lo_space()->CommittedMemory(),
"lo_space_commited_bytes");
AddNumber(result, heap->amount_of_external_allocated_memory(),
"amount_of_external_allocated_memory");
return result;
}
void StatisticsExtension::Register() {
static StatisticsExtension statistics_extension;
static v8::DeclareExtension declaration(&statistics_extension);
}
} } // namespace v8::internal

49
deps/v8/src/extensions/statistics-extension.h

@ -0,0 +1,49 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#include "v8.h"
namespace v8 {
namespace internal {
class StatisticsExtension : public v8::Extension {
public:
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
static void Register();
private:
static const char* const kSource;
};
} } // namespace v8::internal
#endif // V8_EXTENSIONS_STATISTICS_EXTENSION_H_

164
deps/v8/src/factory.cc

@ -115,8 +115,7 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors) {
ASSERT(0 <= number_of_descriptors);
CALL_HEAP_FUNCTION(isolate(),
DescriptorArray::Allocate(number_of_descriptors,
DescriptorArray::MAY_BE_SHARED),
DescriptorArray::Allocate(number_of_descriptors),
DescriptorArray);
}
@ -285,19 +284,27 @@ Handle<String> Factory::NewExternalStringFromTwoByte(
}
Handle<Context> Factory::NewGlobalContext() {
Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateGlobalContext(),
isolate()->heap()->AllocateNativeContext(),
Context);
}
Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
isolate()->heap()->AllocateGlobalContext(*function, *scope_info),
Context);
}
Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateModuleContext(*scope_info),
Context);
}
@ -466,14 +473,15 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->CopyDropDescriptors(), Map);
Handle<Map> Factory::CopyWithPreallocatedFieldDescriptors(Handle<Map> src) {
CALL_HEAP_FUNCTION(
isolate(), src->CopyWithPreallocatedFieldDescriptors(), Map);
}
Handle<Map> Factory::CopyMap(Handle<Map> src,
int extra_inobject_properties) {
Handle<Map> copy = CopyMapDropDescriptors(src);
Handle<Map> copy = CopyWithPreallocatedFieldDescriptors(src);
// Check that we do not overflow the instance size when adding the
// extra inobject properties.
int instance_size_delta = extra_inobject_properties * kPointerSize;
@ -496,10 +504,8 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
}
Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(),
src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED),
Map);
Handle<Map> Factory::CopyMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->Copy(), Map);
}
@ -554,18 +560,27 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
result->set_context(*context);
if (!function_info->bound()) {
int index = function_info->SearchOptimizedCodeMap(context->native_context());
if (!function_info->bound() && index < 0) {
int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
// object, regexp and array literals in this function.
literals->set(JSFunction::kLiteralGlobalContextIndex,
context->global_context());
// Store the native context in the literals array prefix. This
// context will be used when creating object, regexp and array
// literals in this function.
literals->set(JSFunction::kLiteralNativeContextIndex,
context->native_context());
}
result->set_literals(*literals);
}
if (index > 0) {
// Caching of optimized code enabled and optimized code found.
function_info->InstallFromOptimizedCodeMap(*result, index);
return result;
}
if (V8::UseCrankshaft() &&
FLAG_always_opt &&
result->is_compiled() &&
@ -699,7 +714,7 @@ Handle<String> Factory::EmergencyNewError(const char* type,
MaybeObject* maybe_arg = args->GetElement(i);
Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
const char* arg = *arg_str->ToCString();
Vector<char> v2(p, space);
Vector<char> v2(p, static_cast<int>(space));
OS::StrNCpy(v2, arg, space);
space -= Min(space, strlen(arg));
p = &buffer[kBufferSize] - space;
@ -879,97 +894,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
MUST_USE_RESULT static inline MaybeObject* DoCopyInsert(
DescriptorArray* array,
String* key,
Object* value,
PropertyAttributes attributes) {
CallbacksDescriptor desc(key, value, attributes);
MaybeObject* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
return obj;
}
// Allocate the new array.
Handle<DescriptorArray> Factory::CopyAppendForeignDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(isolate(),
DoCopyInsert(*array, *key, *value, attributes),
DescriptorArray);
}
Handle<String> Factory::SymbolFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->LookupSymbol(*value), String);
}
Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
Handle<Object> descriptors) {
v8::NeanderArray callbacks(descriptors);
int nof_callbacks = callbacks.length();
Handle<DescriptorArray> result =
NewDescriptorArray(array->number_of_descriptors() + nof_callbacks);
// Number of descriptors added to the result so far.
int descriptor_count = 0;
// Ensure that marking will not progress and change color of objects.
DescriptorArray::WhitenessWitness witness(*result);
// Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) {
if (!array->IsNullDescriptor(i)) {
DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
}
}
// Number of duplicates detected.
int duplicates = 0;
// Fill in new callback descriptors. Process the callbacks from
// back to front so that the last callback with a given name takes
// precedence over previously added callbacks with that name.
for (int i = nof_callbacks - 1; i >= 0; i--) {
Handle<AccessorInfo> entry =
Handle<AccessorInfo>(AccessorInfo::cast(callbacks.get(i)));
// Ensure the key is a symbol before writing into the instance descriptor.
Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing.
if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
result->Set(descriptor_count, &desc, witness);
descriptor_count++;
} else {
duplicates++;
}
}
// If duplicates were detected, allocate a result of the right size
// and transfer the elements.
if (duplicates > 0) {
int number_of_descriptors = result->number_of_descriptors() - duplicates;
Handle<DescriptorArray> new_result =
NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) {
DescriptorArray::CopyFrom(new_result, i, result, i, witness);
}
result = new_result;
}
// Sort the result before returning.
result->Sort(witness);
return result;
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
@ -978,10 +908,11 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
Handle<JSModule> Factory::NewJSModule() {
Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSModule(), JSModule);
isolate()->heap()->AllocateJSModule(*context, *scope_info), JSModule);
}
@ -1088,7 +1019,7 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
void Factory::SetIdentityHash(Handle<JSObject> object, Smi* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
@ -1188,7 +1119,7 @@ Handle<JSFunction> Factory::NewFunctionHelper(Handle<String> name,
Handle<JSFunction> Factory::NewFunction(Handle<String> name,
Handle<Object> prototype) {
Handle<JSFunction> fun = NewFunctionHelper(name, prototype);
fun->set_context(isolate()->context()->global_context());
fun->set_context(isolate()->context()->native_context());
return fun;
}
@ -1214,7 +1145,7 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
LanguageMode language_mode) {
Handle<JSFunction> fun =
NewFunctionWithoutPrototypeHelper(name, language_mode);
fun->set_context(isolate()->context()->global_context());
fun->set_context(isolate()->context()->native_context());
return fun;
}
@ -1225,8 +1156,8 @@ Handle<Object> Factory::ToObject(Handle<Object> object) {
Handle<Object> Factory::ToObject(Handle<Object> object,
Handle<Context> global_context) {
CALL_HEAP_FUNCTION(isolate(), object->ToObject(*global_context), Object);
Handle<Context> native_context) {
CALL_HEAP_FUNCTION(isolate(), object->ToObject(*native_context), Object);
}
@ -1353,20 +1284,15 @@ Handle<JSFunction> Factory::CreateApiFunction(
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
Handle<DescriptorArray> array =
Handle<DescriptorArray>(map->instance_descriptors());
while (true) {
Handle<Object> props = Handle<Object>(obj->property_accessors());
if (!props->IsUndefined()) {
array = CopyAppendCallbackDescriptors(array, props);
Map::CopyAppendCallbackDescriptors(map, props);
}
Handle<Object> parent = Handle<Object>(obj->parent_template());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
if (!array->IsEmpty()) {
map->set_instance_descriptors(*array);
}
ASSERT(result->shared()->IsApiFunction());
return result;
@ -1403,7 +1329,7 @@ Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys) {
if (context->map_cache()->IsUndefined()) {
// Allocate the new map cache for the global context.
// Allocate the new map cache for the native context.
Handle<MapCache> new_cache = NewMapCache(24);
context->set_map_cache(*new_cache);
}

35
deps/v8/src/factory.h

@ -160,12 +160,15 @@ class Factory {
const ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
Handle<Context> NewNativeContext();
// Create a module context.
Handle<Context> NewModuleContext(Handle<Context> previous,
// Create a global context.
Handle<Context> NewGlobalContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info);
// Create a module context.
Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
// Create a function context.
Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
@ -223,13 +226,12 @@ class Factory {
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
Handle<Map> CopyWithPreallocatedFieldDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
Handle<Map> CopyMap(Handle<Map> map, int extra_inobject_props);
Handle<Map> CopyMapDropTransitions(Handle<Map> map);
Handle<Map> CopyMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind elements_kind);
@ -267,7 +269,8 @@ class Factory {
Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
// JS modules are pretenured.
Handle<JSModule> NewJSModule();
Handle<JSModule> NewJSModule(Handle<Context> context,
Handle<ScopeInfo> scope_info);
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(
@ -298,7 +301,7 @@ class Factory {
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
void SetIdentityHash(Handle<JSObject> object, Object* hash);
void SetIdentityHash(Handle<JSObject> object, Smi* hash);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@ -332,7 +335,7 @@ class Factory {
Handle<Object> ToObject(Handle<Object> object);
Handle<Object> ToObject(Handle<Object> object,
Handle<Context> global_context);
Handle<Context> native_context);
// Interface for creating error objects.
@ -386,12 +389,6 @@ class Factory {
Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code);
Handle<DescriptorArray> CopyAppendForeignDescriptor(
Handle<DescriptorArray> array,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
@ -464,7 +461,7 @@ class Factory {
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
// Return a map using the map cache in the global context.
// Return a map using the map cache in the native context.
// The key the an ordered set of property names.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
Handle<FixedArray> keys);
@ -503,14 +500,10 @@ class Factory {
Handle<String> name,
LanguageMode language_mode);
Handle<DescriptorArray> CopyAppendCallbackDescriptors(
Handle<DescriptorArray> array,
Handle<Object> descriptors);
// Create a new map cache.
Handle<MapCache> NewMapCache(int at_least_space_for);
// Update the map cache in the global context with (keys, map)
// Update the map cache in the native context with (keys, map)
Handle<MapCache> AddToMapCache(Handle<Context> context,
Handle<FixedArray> keys,
Handle<Map> map);

51
deps/v8/src/flag-definitions.h

@ -132,9 +132,9 @@ public:
// Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
DEFINE_bool(es5_readonly, false,
DEFINE_bool(es5_readonly, true,
"activate correct semantics for inheriting readonliness")
DEFINE_bool(es52_globals, false,
DEFINE_bool(es52_globals, true,
"activate new semantics for global var declarations")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
@ -152,7 +152,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@ -206,12 +206,27 @@ DEFINE_bool(array_index_dehoisting, false,
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
DEFINE_bool(lookup_sample_by_shared, true,
"when picking a function to optimize, watch for shared function "
"info, not JSFunction itself")
DEFINE_bool(cache_optimized_code, true,
"cache optimized code for closures")
DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in"
"safe operations")
DEFINE_bool(parallel_recompilation, false,
"optimizing hot functions asynchronously on a separate thread")
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@ -228,7 +243,8 @@ DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 5900,
// 0x1700 fits in the immediate field of an ARM instruction.
DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow optimization")
@ -263,7 +279,9 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 instructions (ARM only)")
"enabling ARMv7 and VFP2 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
@ -307,8 +325,8 @@ DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
DEFINE_int(max_opt_count, 10,
"maximum number of optimization attempts before giving up.")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@ -348,12 +366,17 @@ DEFINE_bool(trace_gc, false,
DEFINE_bool(trace_gc_nvp, false,
"print one detailed trace line in name=value format "
"after each garbage collection")
DEFINE_bool(trace_gc_ignore_scavenger, false,
"do not print trace line after scavenger collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
DEFINE_bool(trace_external_memory, false,
"print amount of external allocated memory after each time "
"it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
@ -362,13 +385,12 @@ DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
// v8.cc
DEFINE_bool(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
DEFINE_bool(send_idle_notification, false,
"Send idle notifcation between stress runs.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
@ -402,6 +424,7 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
@ -446,6 +469,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
"file in which to serialize heap")
#endif
// mksnapshot.cc
DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
" the snapshot (mksnapshot only)")
//
// Dev shell flags
//
@ -532,6 +559,8 @@ DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(verify_native_context_separation, false,
"verify that code holds on to at most one native context after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
@ -604,6 +633,8 @@ DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
//
// Disassembler only flags

14
deps/v8/src/flags.cc

@ -31,7 +31,7 @@
#include "v8.h"
#include "platform.h"
#include "smart-array-pointer.h"
#include "smart-pointers.h"
#include "string-stream.h"
@ -343,6 +343,7 @@ static Flag* FindFlag(const char* name) {
int FlagList::SetFlagsFromCommandLine(int* argc,
char** argv,
bool remove_flags) {
int return_code = 0;
// parse arguments
for (int i = 1; i < *argc;) {
int j = i; // j > 0
@ -368,7 +369,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
} else {
fprintf(stderr, "Error: unrecognized flag %s\n"
"Try --help for options\n", arg);
return j;
return_code = j;
break;
}
}
@ -382,7 +384,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: missing value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
return j;
return_code = j;
break;
}
}
@ -424,7 +427,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
return j;
return_code = j;
break;
}
// remove the flag & value from the command
@ -451,7 +455,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
exit(0);
}
// parsed all flags successfully
return 0;
return return_code;
}

24
deps/v8/src/frames.cc

@ -789,7 +789,7 @@ void JavaScriptFrame::PrintTop(FILE* file,
ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", *c_script_name, line);
} else {
PrintF(file, "at <unknown>:%d", line);
PrintF(file, " at <unknown>:%d", line);
}
} else {
PrintF(file, " at <unknown>:<unknown>");
@ -832,12 +832,23 @@ void FrameSummary::Print() {
}
JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
int literal_id) {
if (literal_id == Translation::kSelfLiteralId) {
return JSFunction::cast(function());
}
return JSFunction::cast(literal_array->get(literal_id));
}
void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
FixedArray* literal_array = data->LiteralArray();
// BUG(3243555): Since we don't have a lazy-deopt registered at
// throw-statements, we can't use the translation at the call-site of
@ -864,11 +875,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) {
i--;
int ast_id = it.Next();
int function_id = it.Next();
BailoutId ast_id = BailoutId(it.Next());
JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
JSFunction* function =
JSFunction::cast(data->LiteralArray()->get(function_id));
// The translation commands are ordered and the receiver is always
// at the first position. Since we are always at a call when we need
@ -975,6 +984,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
FixedArray* literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
@ -990,10 +1000,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
if (opcode == Translation::JS_FRAME) {
jsframe_count--;
it.Next(); // Skip ast id.
int function_id = it.Next();
JSFunction* function = LiteralAt(literal_array, it.Next());
it.Next(); // Skip height.
JSFunction* function =
JSFunction::cast(data->LiteralArray()->get(function_id));
functions->Add(function);
} else {
// Skip over operands to advance to the next opcode.

2
deps/v8/src/frames.h

@ -577,6 +577,8 @@ class OptimizedFrame : public JavaScriptFrame {
inline explicit OptimizedFrame(StackFrameIterator* iterator);
private:
JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
friend class StackFrameIterator;
};

132
deps/v8/src/full-codegen.cc

@ -36,6 +36,7 @@
#include "prettyprinter.h"
#include "scopes.h"
#include "scopeinfo.h"
#include "snapshot.h"
#include "stub-cache.h"
namespace v8 {
@ -303,7 +304,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
FullCodeGenerator cgen(&masm, info, isolate->zone());
FullCodeGenerator cgen(&masm, info);
cgen.Generate();
if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception());
@ -315,7 +316,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize) &&
info->function()->scope()->AllowsLazyRecompilation());
info->function()->scope()->AllowsLazyCompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);
@ -352,7 +353,7 @@ unsigned FullCodeGenerator::EmitStackCheckTable() {
unsigned length = stack_checks_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
__ dd(stack_checks_[i].id);
__ dd(stack_checks_[i].id.ToInt());
__ dd(stack_checks_[i].pc_and_state);
}
return offset;
@ -367,7 +368,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationOutputData> data = isolate()->factory()->
NewDeoptimizationOutputData(length, TENURED);
for (int i = 0; i < length; i++) {
data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
data->SetAstId(i, bailout_entries_[i].id);
data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
}
code->set_deoptimization_data(*data);
@ -382,6 +383,20 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
}
void FullCodeGenerator::Initialize() {
// The generation of debug code must match between the snapshot code and the
// code that is generated later. This is assumed by the debugger when it is
// calculating PC offsets after generating a debug version of code. Therefore
// we disable the production of debug code in the full compiler if we are
// either generating a snapshot or we booted from a snapshot.
generate_debug_code_ = FLAG_debug_code &&
!Serializer::enabled() &&
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
}
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length();
@ -389,7 +404,7 @@ void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
isolate()->factory()->NewFixedArray(array_size, TENURED));
for (int i = 0; i < length; i++) {
cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
cache->SetAstId(i, type_feedback_cells_[i].ast_id);
cache->SetCell(i, *type_feedback_cells_[i].cell);
}
TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
@ -420,7 +435,7 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
}
void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!info_->HasDeoptimizationSupport()) return;
@ -445,13 +460,13 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
void FullCodeGenerator::RecordTypeFeedbackCell(
unsigned id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackId id, Handle<JSGlobalPropertyCell> cell) {
TypeFeedbackCellEntry entry = { id, cell };
type_feedback_cells_.Add(entry, zone());
}
void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
ASSERT(masm_->pc_offset() > 0);
@ -589,27 +604,20 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
Handle<JSModule> instance = module->interface()->Instance();
ASSERT(!instance.is_null());
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
Interface* interface = module->interface();
Handle<JSModule> instance = interface->Instance();
// Generate code for module creation and linking.
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
if (scope_info->HasContext()) {
// Set up module context.
__ Push(scope_info);
__ Push(instance);
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
// Set up module context.
__ Push(instance);
__ CallRuntime(Runtime::kPushModuleContext, 1);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
Comment cmnt(masm_, "[ Declarations");
@ -617,42 +625,21 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
}
scope_ = saved_scope;
if (scope_info->HasContext()) {
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
}
// Populate module instance object.
const PropertyAttributes attr =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
for (Interface::Iterator it = module->interface()->iterator();
!it.done(); it.Advance()) {
if (it.interface()->IsModule()) {
Handle<Object> value = it.interface()->Instance();
ASSERT(!value.is_null());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
} else {
// TODO(rossberg): set proper getters instead of undefined...
// instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
Handle<Object> value(isolate()->heap()->undefined_value());
JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
}
}
USE(instance->PreventExtensions());
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
// Noting to do.
// Nothing to do.
// The instance object is resolved statically through the module's interface.
}
void FullCodeGenerator::VisitModulePath(ModulePath* module) {
// Noting to do.
// Nothing to do.
// The instance object is resolved statically through the module's interface.
}
@ -822,7 +809,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
Expression* left = expr->left();
Expression* right = expr->right();
int right_id = expr->RightId();
BailoutId right_id = expr->RightId();
Label done;
if (context()->IsTest()) {
@ -916,25 +903,36 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Scope* saved_scope = scope();
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->scope();
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
scope_ = stmt->scope();
if (scope_->is_module_scope()) {
// If this block is a module body, then we have already allocated and
// initialized the declarations earlier. Just push the context.
ASSERT(!scope_->interface()->Instance().is_null());
__ Push(scope_->interface()->Instance());
__ CallRuntime(Runtime::kPushModuleContext, 1);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
} else {
{ Comment cmnt(masm_, "[ Extend block context");
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
int heap_slots =
scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope_->declarations());
}
}
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);

46
deps/v8/src/full-codegen.h

@ -77,8 +77,7 @@ class FullCodeGenerator: public AstVisitor {
TOS_REG
};
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
Zone* zone)
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm),
info_(info),
scope_(info->scope()),
@ -87,12 +86,18 @@ class FullCodeGenerator: public AstVisitor {
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0, zone),
stack_checks_(2, zone), // There's always at least one.
? info->function()->ast_node_count() : 0,
info->zone()),
stack_checks_(2, info->zone()), // There's always at least one.
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0, zone),
? info->function()->ast_node_count() : 0,
info->zone()),
ic_total_count_(0),
zone_(zone) { }
zone_(info->zone()) {
Initialize();
}
void Initialize();
static bool MakeCode(CompilationInfo* info);
@ -112,6 +117,21 @@ class FullCodeGenerator: public AstVisitor {
Zone* zone() const { return zone_; }
static const int kMaxBackEdgeWeight = 127;
#if V8_TARGET_ARCH_IA32
static const int kBackEdgeDistanceUnit = 100;
#elif V8_TARGET_ARCH_X64
static const int kBackEdgeDistanceUnit = 162;
#elif V8_TARGET_ARCH_ARM
static const int kBackEdgeDistanceUnit = 142;
#elif V8_TARGET_ARCH_MIPS
static const int kBackEdgeDistanceUnit = 142;
#else
#error Unsupported target architecture.
#endif
private:
class Breakable;
class Iteration;
@ -397,11 +417,12 @@ class FullCodeGenerator: public AstVisitor {
// Bailout support.
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(unsigned id, State state);
void PrepareForBailoutForId(BailoutId id, State state);
// Cache cell support. This associates AST ids with global property cells
// that will be cleared during GC and collected by the type-feedback oracle.
void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
void RecordTypeFeedbackCell(TypeFeedbackId id,
Handle<JSGlobalPropertyCell> cell);
// Record a call's return site offset, used to rebuild the frame if the
// called function was inlined at the site.
@ -428,7 +449,7 @@ class FullCodeGenerator: public AstVisitor {
// of code inside the loop.
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code.
void RecordStackCheck(unsigned osr_ast_id);
void RecordStackCheck(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@ -519,7 +540,7 @@ class FullCodeGenerator: public AstVisitor {
void CallIC(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
@ -590,12 +611,12 @@ class FullCodeGenerator: public AstVisitor {
Handle<FixedArray> handler_table() { return handler_table_; }
struct BailoutEntry {
unsigned id;
BailoutId id;
unsigned pc_and_state;
};
struct TypeFeedbackCellEntry {
unsigned ast_id;
TypeFeedbackId ast_id;
Handle<JSGlobalPropertyCell> cell;
};
@ -790,6 +811,7 @@ class FullCodeGenerator: public AstVisitor {
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
bool generate_debug_code_;
Zone* zone_;
friend class NestedStatement;

15
deps/v8/src/globals.h

@ -203,6 +203,7 @@ typedef byte* Address;
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
#define V8PRIuPTR V8_PTR_PREFIX "u"
// Fix for Mac OS X defining uintptr_t as "unsigned long":
#if defined(__APPLE__) && defined(__MACH__)
@ -360,6 +361,20 @@ F FUNCTION_CAST(Address addr) {
#define MUST_USE_RESULT
#endif
// Define DISABLE_ASAN macros.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
#define DISABLE_ASAN __attribute__((no_address_safety_analysis))
#endif
#endif
#ifndef DISABLE_ASAN
#define DISABLE_ASAN
#endif
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)

30
deps/v8/src/handles-inl.h

@ -149,25 +149,31 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation() {
Isolate* isolate = Isolate::Current();
v8::ImplementationUtilities::HandleScopeData* current =
Isolate::Current()->handle_scope_data();
isolate->handle_scope_data();
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
current->limit = current->next;
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
current->limit = current->next;
level_ = current->level;
current->level = 0;
level_ = current->level;
current->level = 0;
}
}
inline NoHandleAllocation::~NoHandleAllocation() {
// Restore state in current handle scope to re-enable handle
// allocations.
v8::ImplementationUtilities::HandleScopeData* data =
Isolate::Current()->handle_scope_data();
ASSERT_EQ(0, data->level);
data->level = level_;
if (active_) {
// Restore state in current handle scope to re-enable handle
// allocations.
v8::ImplementationUtilities::HandleScopeData* data =
Isolate::Current()->handle_scope_data();
ASSERT_EQ(0, data->level);
data->level = level_;
}
}
#endif

156
deps/v8/src/handles.cc

@ -165,7 +165,7 @@ void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) {
Handle<Map> new_initial_map =
func->GetIsolate()->factory()->CopyMapDropTransitions(
func->GetIsolate()->factory()->CopyMap(
Handle<Map>(func->initial_map()));
new_initial_map->set_unused_property_fields(nof);
func->set_initial_map(*new_initial_map);
@ -561,6 +561,9 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
result = enum_fun(info);
}
}
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
return result;
}
@ -581,6 +584,9 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
// Leaving JavaScript.
VMState state(isolate, EXTERNAL);
result = enum_fun(info);
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
}
}
return result;
@ -604,7 +610,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
isolate->context()->global_context()->arguments_boilerplate(),
isolate->context()->native_context()->arguments_boilerplate(),
isolate);
Handle<JSFunction> arguments_function = Handle<JSFunction>(
JSFunction::cast(arguments_boilerplate->map()->constructor()),
@ -701,75 +707,106 @@ Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result) {
int index = 0;
Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
isolate->counters()->enum_cache_hits()->Increment();
int own_property_count = object->map()->EnumLength();
// Mark that we have an enum cache if we are allowed to cache it.
if (cache_result && own_property_count == Map::kInvalidEnumCache) {
int num_enum = object->map()->NumberOfDescribedProperties(DONT_ENUM);
object->map()->SetEnumLength(num_enum);
}
DescriptorArray* desc = object->map()->instance_descriptors();
return Handle<FixedArray>(FixedArray::cast(desc->GetEnumCache()),
isolate);
Handle<FixedArray> keys(FixedArray::cast(desc->GetEnumCache()), isolate);
isolate->counters()->enum_cache_hits()->Increment();
return keys;
}
isolate->counters()->enum_cache_misses()->Increment();
Handle<Map> map(object->map());
int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
if (map->instance_descriptors()->IsEmpty()) {
isolate->counters()->enum_cache_hits()->Increment();
if (cache_result) map->SetEnumLength(0);
return isolate->factory()->empty_fixed_array();
}
Handle<FixedArray> indices;
Handle<FixedArray> sort_array2;
isolate->counters()->enum_cache_misses()->Increment();
if (cache_result) {
indices = isolate->factory()->NewFixedArray(num_enum);
sort_array2 = isolate->factory()->NewFixedArray(num_enum);
}
int num_enum = map->NumberOfDescribedProperties(DONT_ENUM);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum);
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
int index = 0;
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) {
storage->set(index, descs->GetKey(i));
PropertyDetails details = descs->GetDetails(i);
sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) {
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
sort_array2 = Handle<FixedArray>();
} else {
int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
indices->set(index, Smi::FromInt(field_index));
sort_array2->set(index, Smi::FromInt(details.index()));
}
}
index++;
}
}
storage->SortPairs(*sort_array, sort_array->length());
if (!indices.is_null()) {
indices->SortPairs(*sort_array2, sort_array2->length());
}
ASSERT(index == storage->length());
Handle<FixedArray> bridge_storage =
isolate->factory()->NewFixedArray(
DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors();
desc->SetEnumCache(*bridge_storage,
*storage,
indices.is_null() ? Object::cast(Smi::FromInt(0))
: Object::cast(*indices));
if (cache_result) {
Handle<FixedArray> bridge_storage =
isolate->factory()->NewFixedArray(
DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors();
desc->SetEnumCache(*bridge_storage,
*storage,
indices.is_null() ? Object::cast(Smi::FromInt(0))
: Object::cast(*indices));
object->map()->SetEnumLength(index);
}
ASSERT(storage->length() == index);
return storage;
} else {
int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
Handle<StringDictionary> dictionary(object->property_dictionary());
int length = dictionary->NumberOfElements();
if (length == 0) {
return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
}
// The enumeration array is generated by allocating an array big enough to
// hold all properties that have been seen, whether they are are deleted or
// not. Subsequently all visible properties are added to the array. If some
// properties were not visible, the array is trimmed so it only contains
// visible properties. This improves over adding elements and sorting by
// index by having linear complexity rather than n*log(n).
// By comparing the monotonous NextEnumerationIndex to the NumberOfElements,
// we can predict the number of holes in the final array. If there will be
// more than 50% holes, regenerate the enumeration indices to reduce the
// number of holes to a minimum. This avoids allocating a large array if
// many properties were added but subsequently deleted.
int next_enumeration = dictionary->NextEnumerationIndex();
if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) {
StringDictionary::DoGenerateNewEnumerationIndices(dictionary);
next_enumeration = dictionary->NextEnumerationIndex();
}
Handle<FixedArray> storage =
isolate->factory()->NewFixedArray(next_enumeration);
storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage));
ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM));
return storage;
}
}
@ -958,4 +995,47 @@ int Utf8Length(Handle<String> str) {
return len;
}
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
ASSERT(impl_->isolate() == Isolate::Current());
impl_->BeginDeferredScope();
v8::ImplementationUtilities::HandleScopeData* data =
impl_->isolate()->handle_scope_data();
Object** new_next = impl_->GetSpareOrNewBlock();
Object** new_limit = &new_next[kHandleBlockSize];
ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
impl_->blocks()->Add(new_next);
#ifdef DEBUG
prev_level_ = data->level;
#endif
data->level++;
prev_limit_ = data->limit;
prev_next_ = data->next;
data->next = new_next;
data->limit = new_limit;
}
DeferredHandleScope::~DeferredHandleScope() {
impl_->isolate()->handle_scope_data()->level--;
ASSERT(handles_detached_);
ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_);
}
DeferredHandles* DeferredHandleScope::Detach() {
DeferredHandles* deferred = impl_->Detach(prev_limit_);
v8::ImplementationUtilities::HandleScopeData* data =
impl_->isolate()->handle_scope_data();
data->next = prev_next_;
data->limit = prev_limit_;
#ifdef DEBUG
handles_detached_ = true;
#endif
return deferred;
}
} } // namespace v8::internal

36
deps/v8/src/handles.h

@ -95,6 +95,10 @@ class Handle {
};
class DeferredHandles;
class HandleScopeImplementer;
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
@ -156,8 +160,37 @@ class HandleScope {
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
friend class v8::internal::DeferredHandles;
friend class v8::HandleScope;
friend class v8::internal::HandleScopeImplementer;
friend class v8::ImplementationUtilities;
friend class v8::internal::Isolate;
};
class DeferredHandles;
class DeferredHandleScope {
public:
explicit DeferredHandleScope(Isolate* isolate);
// The DeferredHandles object returned stores the Handles created
// since the creation of this DeferredHandleScope. The Handles are
// alive as long as the DeferredHandles object is alive.
DeferredHandles* Detach();
~DeferredHandleScope();
private:
Object** prev_limit_;
Object** prev_next_;
HandleScopeImplementer* impl_;
#ifdef DEBUG
bool handles_detached_;
int prev_level_;
#endif
friend class HandleScopeImplementer;
};
@ -216,7 +249,7 @@ Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
// if none exists.
Handle<JSValue> GetScriptWrapper(Handle<Script> script);
// Script line number computations.
// Script line number computations. Note that the line number is zero-based.
void InitScriptLineEnds(Handle<Script> script);
// For string calculates an array of line end positions. If the string
// does not end with a new line character, this character may optionally be
@ -294,6 +327,7 @@ class NoHandleAllocation BASE_EMBEDDED {
inline ~NoHandleAllocation();
private:
int level_;
bool active_;
#endif
};

8
deps/v8/src/hashmap.h

@ -59,7 +59,8 @@ class TemplateHashMapImpl {
struct Entry {
void* key;
void* value;
uint32_t hash; // the full hash value for key
uint32_t hash; // The full hash value for key
int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, Lookup()
@ -140,6 +141,7 @@ TemplateHashMapImpl<AllocationPolicy>::Lookup(
p->key = key;
p->value = NULL;
p->hash = hash;
p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
@ -297,7 +299,9 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
// Rehash all current entries.
for (Entry* p = map; n > 0; p++) {
if (p->key != NULL) {
Lookup(p->key, p->hash, true, allocator)->value = p->value;
Entry* entry = Lookup(p->key, p->hash, true, allocator);
entry->value = p->value;
entry->order = p->order;
n--;
}
}

41
deps/v8/src/heap-inl.h

@ -468,10 +468,12 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
} else {
// Give up and reset the counters in case of an overflow.
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
intptr_t amount_since_last_global_gc =
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
intptr_t amount_since_last_global_gc = PromotedExternalMemorySize();
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
}
@ -479,8 +481,19 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
// Avoid underflow.
if (amount >= 0) {
amount_of_external_allocated_memory_ = amount;
} else {
// Give up and reset the counters in case of an overflow.
amount_of_external_allocated_memory_ = 0;
amount_of_external_allocated_memory_at_last_global_gc_ = 0;
}
}
if (FLAG_trace_external_memory) {
PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
PrintF("Adjust amount of external memory: delta=%6" V8_PTR_PREFIX "d KB, "
" amount=%6" V8_PTR_PREFIX "d KB, isolate=0x%08" V8PRIxPTR ".\n",
change_in_bytes / 1024, amount_of_external_allocated_memory_ / 1024,
reinterpret_cast<intptr_t>(isolate()));
}
ASSERT(amount_of_external_allocated_memory_ >= 0);
return amount_of_external_allocated_memory_;
}
@ -757,37 +770,47 @@ double GCTracer::SizeOfHeapObjects() {
}
#ifdef DEBUG
DisallowAllocationFailure::DisallowAllocationFailure() {
#ifdef DEBUG
old_state_ = HEAP->disallow_allocation_failure_;
HEAP->disallow_allocation_failure_ = true;
#endif
}
DisallowAllocationFailure::~DisallowAllocationFailure() {
#ifdef DEBUG
HEAP->disallow_allocation_failure_ = old_state_;
}
#endif
}
#ifdef DEBUG
AssertNoAllocation::AssertNoAllocation() {
old_state_ = HEAP->allow_allocation(false);
Isolate* isolate = ISOLATE;
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
old_state_ = isolate->heap()->allow_allocation(false);
}
}
AssertNoAllocation::~AssertNoAllocation() {
HEAP->allow_allocation(old_state_);
if (active_) HEAP->allow_allocation(old_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation() {
old_state_ = HEAP->allow_allocation(true);
Isolate* isolate = ISOLATE;
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
old_state_ = isolate->heap()->allow_allocation(true);
}
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
HEAP->allow_allocation(old_state_);
if (active_) HEAP->allow_allocation(old_state_);
}
#else

653
deps/v8/src/heap.cc

File diff suppressed because it is too large

198
deps/v8/src/heap.h

@ -64,7 +64,7 @@ namespace internal {
V(Map, ascii_symbol_map, AsciiSymbolMap) \
V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(Map, scope_info_map, ScopeInfoMap) \
@ -87,6 +87,7 @@ namespace internal {
V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
V(Map, string_map, StringMap) \
@ -130,6 +131,7 @@ namespace internal {
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, oddball_map, OddballMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
@ -150,7 +152,9 @@ namespace internal {
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@ -531,7 +535,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObject(
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateJSModule();
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
@ -821,13 +826,16 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateHashTable(
int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
// Allocate a native (but otherwise uninitialized) context.
MUST_USE_RESULT MaybeObject* AllocateNativeContext();
// Allocate a module context.
MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
// Allocate a global context.
MUST_USE_RESULT MaybeObject* AllocateGlobalContext(JSFunction* function,
ScopeInfo* scope_info);
// Allocate a module context.
MUST_USE_RESULT MaybeObject* AllocateModuleContext(ScopeInfo* scope_info);
// Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
JSFunction* function);
@ -1101,8 +1109,8 @@ class Heap {
#endif
void AddGCPrologueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
void RemoveGCPrologueCallback(GCEpilogueCallback callback);
GCPrologueCallback callback, GCType gc_type_filter);
void RemoveGCPrologueCallback(GCPrologueCallback callback);
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
@ -1149,13 +1157,13 @@ class Heap {
// not match the empty string.
String* hidden_symbol() { return hidden_symbol_; }
void set_global_contexts_list(Object* object) {
global_contexts_list_ = object;
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
Object* global_contexts_list() { return global_contexts_list_; }
Object* native_contexts_list() { return native_contexts_list_; }
// Number of mark-sweeps.
int ms_count() { return ms_count_; }
unsigned int ms_count() { return ms_count_; }
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
@ -1226,9 +1234,9 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
// Get address of global contexts list for serialization support.
Object** global_contexts_list_address() {
return &global_contexts_list_;
// Get address of native contexts list for serialization support.
Object** native_contexts_list_address() {
return &native_contexts_list_;
}
#ifdef DEBUG
@ -1238,10 +1246,6 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
// Verify that AccessorPairs are not shared, i.e. make sure that they have
// exactly one pointer to them.
void VerifyNoAccessorPairSharing();
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
@ -1290,6 +1294,7 @@ class Heap {
return disallow_allocation_failure_;
}
void TracePathToObjectFrom(Object* target, Object* root);
void TracePathToObject(Object* target);
void TracePathToGlobal();
#endif
@ -1393,15 +1398,15 @@ class Heap {
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
kSymbolTableRootIndex,
kStrongRootListLength = kSymbolTableRootIndex,
kRootListLength
@ -1588,6 +1593,16 @@ class Heap {
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void SetGetterStubDeoptPCOffset(int pc_offset) {
ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void SetSetterStubDeoptPCOffset(int pc_offset) {
ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@ -1601,6 +1616,60 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
intptr_t amount_of_external_allocated_memory() {
return amount_of_external_allocated_memory_;
}
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
// another.
enum {
FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
FIRST_FIXED_ARRAY_SUB_TYPE =
FIRST_CODE_KIND_SUB_TYPE + Code::LAST_CODE_KIND + 1,
OBJECT_STATS_COUNT =
FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1
};
void RecordObjectStats(InstanceType type, int sub_type, size_t size) {
ASSERT(type <= LAST_TYPE);
if (sub_type < 0) {
object_counts_[type]++;
object_sizes_[type] += size;
} else {
if (type == CODE_TYPE) {
ASSERT(sub_type <= Code::LAST_CODE_KIND);
object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++;
object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size;
} else if (type == FIXED_ARRAY_TYPE) {
ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++;
object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size;
}
}
}
void CheckpointObjectStats();
// We don't use a ScopedLock here since we want to lock the heap
// only when FLAG_parallel_recompilation is true.
class RelocationLock {
public:
explicit RelocationLock(Heap* heap) : heap_(heap) {
if (FLAG_parallel_recompilation) {
heap_->relocation_mutex_->Lock();
}
}
~RelocationLock() {
if (FLAG_parallel_recompilation) {
heap_->relocation_mutex_->Unlock();
}
}
private:
Heap* heap_;
};
private:
Heap();
@ -1653,7 +1722,7 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
intptr_t PromotedExternalMemorySize();
int ms_count_; // how many mark-sweep collections happened
unsigned int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
// For post mortem debugging.
@ -1725,7 +1794,7 @@ class Heap {
// last GC.
int old_gen_exhausted_;
Object* global_contexts_list_;
Object* native_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
@ -1994,14 +2063,24 @@ class Heap {
void AdvanceIdleIncrementalMarking(intptr_t step_size);
void ClearObjectStats(bool clear_last_time_stats = false);
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
// Object counts and used memory by InstanceType
size_t object_counts_[OBJECT_STATS_COUNT];
size_t object_counts_last_time_[OBJECT_STATS_COUNT];
size_t object_sizes_[OBJECT_STATS_COUNT];
size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
// Maximum GC pause.
int max_gc_pause_;
// Total time spent in GC.
int total_gc_time_ms_;
// Maximum size of objects alive after GC.
intptr_t max_alive_after_gc_;
@ -2046,6 +2125,8 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
Mutex* relocation_mutex_;
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
@ -2054,7 +2135,7 @@ class Heap {
friend class Page;
friend class Isolate;
friend class MarkCompactCollector;
friend class StaticMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
DISALLOW_COPY_AND_ASSIGN(Heap);
@ -2094,10 +2175,26 @@ class HeapStats {
};
class DisallowAllocationFailure {
public:
inline DisallowAllocationFailure();
inline ~DisallowAllocationFailure();
#ifdef DEBUG
private:
bool old_state_;
#endif
};
class AlwaysAllocateScope {
public:
inline AlwaysAllocateScope();
inline ~AlwaysAllocateScope();
private:
// Implicitly disable artificial allocation failures.
DisallowAllocationFailure disallow_allocation_failure_;
};
@ -2322,9 +2419,11 @@ class DescriptorLookupCache {
static int Hash(DescriptorArray* array, String* name) {
// Uses only lower 32 bits if pointers are larger.
uint32_t array_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array))
>> kPointerSizeLog2;
uint32_t name_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
>> kPointerSizeLog2;
return (array_hash ^ name_hash) % kLength;
}
@ -2342,18 +2441,6 @@ class DescriptorLookupCache {
};
#ifdef DEBUG
class DisallowAllocationFailure {
public:
inline DisallowAllocationFailure();
inline ~DisallowAllocationFailure();
private:
bool old_state_;
};
#endif
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
@ -2369,6 +2456,7 @@ class AssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
bool active_;
#endif
};
@ -2381,6 +2469,7 @@ class DisableAssertNoAllocation {
#ifdef DEBUG
private:
bool old_state_;
bool active_;
#endif
};
@ -2500,24 +2589,31 @@ class GCTracer BASE_EMBEDDED {
};
class StringSplitCache {
class RegExpResultsCache {
public:
static Object* Lookup(FixedArray* cache, String* string, String* pattern);
enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
// Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
// On success, the returned result is guaranteed to be a COW-array.
static Object* Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
ResultsCacheType type);
// Attempt to add value_array to the cache specified by type. On success,
// value_array is turned into a COW-array.
static void Enter(Heap* heap,
FixedArray* cache,
String* string,
String* pattern,
FixedArray* array);
String* key_string,
Object* key_pattern,
FixedArray* value_array,
ResultsCacheType type);
static void Clear(FixedArray* cache);
static const int kStringSplitCacheSize = 0x100;
static const int kRegExpResultsCacheSize = 0x100;
private:
static const int kArrayEntriesPerCacheEntry = 4;
static const int kStringOffset = 0;
static const int kPatternOffset = 1;
static const int kArrayOffset = 2;
static MaybeObject* WrapFixedArrayInJSArray(Object* fixed_array);
};

196
deps/v8/src/hydrogen-instructions.cc

@ -156,6 +156,20 @@ void Range::Union(Range* other) {
}
void Range::CombinedMax(Range* other) {
upper_ = Max(upper_, other->upper_);
lower_ = Max(lower_, other->lower_);
set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
}
void Range::CombinedMin(Range* other) {
upper_ = Min(upper_, other->upper_);
lower_ = Min(lower_, other->lower_);
set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
}
void Range::Sar(int32_t value) {
int32_t bits = value & 0x1F;
lower_ = lower_ >> bits;
@ -861,12 +875,14 @@ HValue* HBitwise::Canonicalize() {
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
if (left()->IsConstant() &&
HConstant::cast(left())->HasInteger32Value() &&
HConstant::cast(left())->Integer32Value() == nop_constant) {
HConstant::cast(left())->Integer32Value() == nop_constant &&
!right()->CheckFlag(kUint32)) {
return right();
}
if (right()->IsConstant() &&
HConstant::cast(right())->HasInteger32Value() &&
HConstant::cast(right())->Integer32Value() == nop_constant) {
HConstant::cast(right())->Integer32Value() == nop_constant &&
!left()->CheckFlag(kUint32)) {
return left();
}
return this;
@ -878,7 +894,9 @@ HValue* HBitNot::Canonicalize() {
if (value()->IsBitNot()) {
HValue* result = HBitNot::cast(value())->value();
ASSERT(result->representation().IsInteger32());
return result;
if (!result->CheckFlag(kUint32)) {
return result;
}
}
return this;
}
@ -941,7 +959,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
// introduced.
if (value()->representation().IsInteger32()) return value();
#ifdef V8_TARGET_ARCH_ARM
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_X64)
if (value()->IsDiv() && (value()->UseCount() == 1)) {
// TODO(2038): Implement this optimization for non ARM architectures.
HDiv* hdiv = HDiv::cast(value());
@ -1072,6 +1091,11 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) {
}
void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
}
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@ -1100,6 +1124,7 @@ Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
to().IsTagged() &&
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
}
@ -1232,6 +1257,24 @@ Range* HMod::InferRange(Zone* zone) {
}
Range* HMathMinMax::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
if (operation_ == kMathMax) {
res->CombinedMax(b);
} else {
ASSERT(operation_ == kMathMin);
res->CombinedMin(b);
}
return res;
} else {
return HValue::InferRange(zone);
}
}
void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
@ -1346,7 +1389,7 @@ void HPhi::ResetInteger32Uses() {
void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d", ast_id());
stream->Add("id=%d", ast_id().ToInt());
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
@ -1375,45 +1418,82 @@ void HDeoptimize::PrintDataTo(StringStream* stream) {
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id());
stream->Add("%s, id=%d", *name, function()->id().ToInt());
}
static bool IsInteger32(double value) {
double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
}
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
has_double_value_(false),
int32_value_(0),
double_value_(0) {
has_double_value_(false) {
set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
double_value_ = n;
has_double_value_ = true;
}
}
HConstant::HConstant(int32_t integer_value, Representation r)
: has_int32_value_(true),
has_double_value_(true),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
set_representation(r);
SetFlag(kUseGVN);
}
HConstant::HConstant(double double_value, Representation r)
: has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
set_representation(r);
SetFlag(kUseGVN);
}
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
if (handle_.is_null()) {
ASSERT(has_int32_value_ || has_double_value_);
if (has_int32_value_) return new(zone) HConstant(int32_value_, r);
return new(zone) HConstant(double_value_, r);
}
return new(zone) HConstant(handle_, r);
}
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (!has_double_value_) return NULL;
int32_t truncated = NumberToInt32(*handle_);
return new(zone) HConstant(FACTORY->NewNumberFromInt(truncated),
Representation::Integer32());
if (has_int32_value_) {
if (handle_.is_null()) {
return new(zone) HConstant(int32_value_, Representation::Integer32());
} else {
// Re-use the existing Handle if possible.
return new(zone) HConstant(handle_, Representation::Integer32());
}
} else if (has_double_value_) {
return new(zone) HConstant(DoubleToInt32(double_value_),
Representation::Integer32());
} else {
return NULL;
}
}
bool HConstant::ToBoolean() const {
bool HConstant::ToBoolean() {
// Converts the constant's boolean value according to
// ECMAScript section 9.2 ToBoolean conversion.
if (HasInteger32Value()) return Integer32Value() != 0;
@ -1421,17 +1501,25 @@ bool HConstant::ToBoolean() const {
double v = DoubleValue();
return v != 0 && !isnan(v);
}
if (handle()->IsTrue()) return true;
if (handle()->IsFalse()) return false;
if (handle()->IsUndefined()) return false;
if (handle()->IsNull()) return false;
if (handle()->IsString() &&
String::cast(*handle())->length() == 0) return false;
Handle<Object> literal = handle();
if (literal->IsTrue()) return true;
if (literal->IsFalse()) return false;
if (literal->IsUndefined()) return false;
if (literal->IsNull()) return false;
if (literal->IsString() && String::cast(*literal)->length() == 0) {
return false;
}
return true;
}
void HConstant::PrintDataTo(StringStream* stream) {
handle()->ShortPrint(stream);
if (has_int32_value_) {
stream->Add("%d ", int32_value_);
} else if (has_double_value_) {
stream->Add("%f ", FmtElm(double_value_));
} else {
handle()->ShortPrint(stream);
}
}
@ -1638,13 +1726,10 @@ static bool PrototypeChainCanNeverResolve(
}
LookupResult lookup(isolate);
JSObject::cast(current)->map()->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsFound()) {
if (lookup.type() != MAP_TRANSITION) return false;
} else if (!lookup.IsCacheable()) {
return false;
}
Map* map = JSObject::cast(current)->map();
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) return false;
if (!lookup.IsCacheable()) return false;
current = JSObject::cast(current)->GetPrototype();
}
return true;
@ -1669,7 +1754,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(map->GetIsolate());
map->LookupInDescriptors(NULL, *name, &lookup);
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
case FIELD: {
@ -1685,20 +1770,24 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
case CONSTANT_FUNCTION:
types_.Add(types->at(i), zone);
break;
case MAP_TRANSITION:
if (!map->has_named_interceptor() &&
PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
case CALLBACKS:
break;
default:
case TRANSITION:
case INTERCEPTOR:
case NONEXISTENT:
case NORMAL:
case HANDLER:
UNREACHABLE();
break;
}
} else if (lookup.IsCacheable()) {
if (!map->has_named_interceptor() &&
PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
} else if (lookup.IsCacheable() &&
// For dicts the lookup on the map will fail, but the object may
// contain the property so we cannot generate a negative lookup
// (which would just be a map check and return undefined).
!map->is_dictionary_map() &&
!map->has_named_interceptor() &&
PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
}
@ -1757,7 +1846,8 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
stream->Add("] ");
dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
@ -1782,7 +1872,8 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
@ -1811,6 +1902,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
@ -1871,7 +1963,8 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
}
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
@ -2079,6 +2172,10 @@ HType HPhi::CalculateInferredType() {
HType HConstant::CalculateInferredType() {
if (has_int32_value_) {
return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
}
if (has_double_value_) return HType::HeapNumber();
return HType::TypeFromValue(handle_);
}
@ -2226,6 +2323,13 @@ HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
HValue* HMathFloorOfDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
SetFlag(kBailoutOnMinusZero);
return NULL;
}
HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
if (range() == NULL || range()->CanBeMinusZero()) {

323
deps/v8/src/hydrogen-instructions.h

@ -124,7 +124,6 @@ class LChunkBuilder;
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(StringCompareAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@ -140,7 +139,9 @@ class LChunkBuilder;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@ -170,6 +171,7 @@ class LChunkBuilder;
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringCompareAndBranch) \
V(StringLength) \
V(Sub) \
V(ThisFunction) \
@ -224,6 +226,16 @@ class LChunkBuilder;
virtual Opcode opcode() const { return HValue::k##type; }
#ifdef DEBUG
#define ASSERT_ALLOCATION_DISABLED do { \
OptimizingCompilerThread* thread = \
ISOLATE->optimizing_compiler_thread(); \
ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
} while (0)
#else
#define ASSERT_ALLOCATION_DISABLED do {} while (0)
#endif
class Range: public ZoneObject {
public:
Range()
@ -276,6 +288,8 @@ class Range: public ZoneObject {
void Intersect(Range* other);
void Union(Range* other);
void CombinedMax(Range* other);
void CombinedMin(Range* other);
void AddConstant(int32_t value);
void Sar(int32_t value);
@ -549,7 +563,14 @@ class HValue: public ZoneObject {
kIsArguments,
kTruncatingToInt32,
kIsDead,
kLastFlag = kIsDead
// Instructions that are allowed to produce full range unsigned integer
// values are marked with kUint32 flag. If arithmetic shift or a load from
// EXTERNAL_UNSIGNED_INT_ELEMENTS array is not marked with this flag
// it will deoptimize if result does not fit into signed integer range.
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
kUint32,
kLastFlag = kUint32
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@ -857,9 +878,14 @@ class HInstruction: public HValue {
void InsertBefore(HInstruction* next);
void InsertAfter(HInstruction* previous);
// The position is a write-once variable.
int position() const { return position_; }
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
void set_position(int position) { position_ = position; }
void set_position(int position) {
ASSERT(!has_position());
ASSERT(position != RelocInfo::kNoPosition);
position_ = position;
}
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@ -1281,7 +1307,7 @@ class HClampToUint8: public HUnaryOperation {
class HSimulate: public HInstruction {
public:
HSimulate(int ast_id, int pop_count, Zone* zone)
HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
@ -1291,9 +1317,9 @@ class HSimulate: public HInstruction {
virtual void PrintDataTo(StringStream* stream);
bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
int ast_id() const { return ast_id_; }
void set_ast_id(int id) {
bool HasAstId() const { return !ast_id_.IsNone(); }
BailoutId ast_id() const { return ast_id_; }
void set_ast_id(BailoutId id) {
ASSERT(!HasAstId());
ast_id_ = id;
}
@ -1341,7 +1367,7 @@ class HSimulate: public HInstruction {
// use lists are correctly updated.
SetOperandAt(values_.length() - 1, value);
}
int ast_id_;
BailoutId ast_id_;
int pop_count_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
@ -1385,20 +1411,29 @@ class HStackCheck: public HTemplateInstruction<1> {
};
enum InliningKind {
NORMAL_RETURN, // Normal function/method call and return.
DROP_EXTRA_ON_RETURN, // Drop an extra value from the environment on return.
CONSTRUCT_CALL_RETURN, // Either use allocated receiver or return value.
GETTER_CALL_RETURN, // Returning from a getter, need to restore context.
SETTER_CALL_RETURN // Use the RHS of the assignment as the return value.
};
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
CallKind call_kind,
bool is_construct,
InliningKind inlining_kind,
Variable* arguments_var,
ZoneList<HValue*>* arguments_values)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind),
is_construct_(is_construct),
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
arguments_values_(arguments_values) {
}
@ -1409,7 +1444,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count() const { return arguments_count_; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
bool is_construct() const { return is_construct_; }
InliningKind inlining_kind() const { return inlining_kind_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@ -1425,7 +1460,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count_;
FunctionLiteral* function_;
CallKind call_kind_;
bool is_construct_;
InliningKind inlining_kind_;
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
};
@ -1469,7 +1504,7 @@ class HPushArgument: public HUnaryOperation {
class HThisFunction: public HTemplateInstruction<0> {
public:
explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
HThisFunction() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@ -1478,18 +1513,10 @@ class HThisFunction: public HTemplateInstruction<0> {
return Representation::None();
}
Handle<JSFunction> closure() const { return closure_; }
DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
protected:
virtual bool DataEquals(HValue* other) {
HThisFunction* b = HThisFunction::cast(other);
return *closure() == *b->closure();
}
private:
Handle<JSFunction> closure_;
virtual bool DataEquals(HValue* other) { return true; }
};
@ -1881,6 +1908,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
class HFixedArrayBaseLength: public HUnaryOperation {
public:
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
set_type(HType::Smi());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
@ -1897,6 +1925,26 @@ class HFixedArrayBaseLength: public HUnaryOperation {
};
class HMapEnumLength: public HUnaryOperation {
public:
explicit HMapEnumLength(HValue* value) : HUnaryOperation(value) {
set_type(HType::Smi());
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
class HElementsKind: public HUnaryOperation {
public:
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
@ -2021,14 +2069,18 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
};
class HLoadElements: public HUnaryOperation {
class HLoadElements: public HTemplateInstruction<2> {
public:
explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
HLoadElements(HValue* value, HValue* typecheck) {
SetOperandAt(0, value);
SetOperandAt(1, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnElementsPointer);
}
HValue* value() { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@ -2289,8 +2341,10 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
return Representation::None();
}
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
ASSERT(!HEAP->IsAllocationAllowed());
ASSERT_ALLOCATION_DISABLED;
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
@ -2459,20 +2513,42 @@ class HArgumentsObject: public HTemplateInstruction<0> {
class HConstant: public HTemplateInstruction<0> {
public:
HConstant(Handle<Object> handle, Representation r);
HConstant(int32_t value, Representation r);
HConstant(double value, Representation r);
Handle<Object> handle() const { return handle_; }
Handle<Object> handle() {
if (handle_.is_null()) {
handle_ = FACTORY->NewNumber(double_value_, TENURED);
}
ASSERT(has_int32_value_ || !handle_->IsSmi());
return handle_;
}
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
}
if (has_double_value_) {
if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
isnan(double_value_)) {
return true;
}
return false;
}
ASSERT(!handle_.is_null());
Heap* heap = HEAP;
// We should have handled minus_zero_value and nan_value in the
// has_double_value_ clause above.
ASSERT(*handle_ != heap->minus_zero_value());
ASSERT(*handle_ != heap->nan_value());
if (*handle_ == heap->undefined_value()) return true;
if (*handle_ == heap->null_value()) return true;
if (*handle_ == heap->true_value()) return true;
if (*handle_ == heap->false_value()) return true;
if (*handle_ == heap->the_hole_value()) return true;
if (*handle_ == heap->minus_zero_value()) return true;
if (*handle_ == heap->nan_value()) return true;
if (*handle_ == heap->empty_string()) return true;
return false;
}
@ -2482,18 +2558,14 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool IsConvertibleToInteger() const {
if (handle_->IsSmi()) return true;
if (handle_->IsHeapNumber() &&
(HeapNumber::cast(*handle_)->value() ==
static_cast<double>(NumberToInt32(*handle_)))) return true;
return false;
return has_int32_value_;
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() const { return handle_->IsSmi(); }
bool IsInteger() { return handle()->IsSmi(); }
HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
HConstant* CopyToTruncatedInt32(Zone* zone) const;
bool HasInteger32Value() const { return has_int32_value_; }
@ -2506,24 +2578,35 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
if (has_int32_value_) return int32_value_;
return DoubleToInt32(double_value_);
// Irrespective of whether a numeric HConstant can be safely
// represented as an int32, we store the (in some cases lossy)
// representation of the number in int32_value_.
return int32_value_;
}
bool HasStringValue() const { return handle_->IsString(); }
bool ToBoolean() const;
bool ToBoolean();
bool IsUint32() {
return HasInteger32Value() && (Integer32Value() >= 0);
}
virtual intptr_t Hashcode() {
ASSERT(!HEAP->allow_allocation(false));
intptr_t hash = reinterpret_cast<intptr_t>(*handle());
// Prevent smis from having fewer hash values when truncated to
// the least significant bits.
const int kShiftSize = kSmiShiftSize + kSmiTagSize;
STATIC_ASSERT(kShiftSize != 0);
return hash ^ (hash >> kShiftSize);
ASSERT_ALLOCATION_DISABLED;
intptr_t hash;
if (has_int32_value_) {
hash = static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
} else {
ASSERT(!handle_.is_null());
hash = reinterpret_cast<intptr_t>(*handle_);
}
return hash;
}
#ifdef DEBUG
@ -2537,15 +2620,32 @@ class HConstant: public HTemplateInstruction<0> {
virtual bool DataEquals(HValue* other) {
HConstant* other_constant = HConstant::cast(other);
return handle().is_identical_to(other_constant->handle());
if (has_int32_value_) {
return other_constant->has_int32_value_ &&
int32_value_ == other_constant->int32_value_;
} else if (has_double_value_) {
return other_constant->has_double_value_ &&
BitCast<int64_t>(double_value_) ==
BitCast<int64_t>(other_constant->double_value_);
} else {
ASSERT(!handle_.is_null());
return !other_constant->handle_.is_null() &&
*handle_ == *other_constant->handle_;
}
}
private:
// If this is a numerical constant, handle_ either points to to the
// HeapObject the constant originated from or is null. If the
// constant is non-numeric, handle_ always points to a valid
// constant HeapObject.
Handle<Object> handle_;
// The following two values represent the int32 and the double value of the
// given constant if there is a lossless conversion between the constant
// and the specific representation.
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
// int32_value_ and double_value_ hold valid, safe representations
// of the constant. has_int32_value_ implies has_double_value_ but
// not the converse.
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
int32_t int32_value_;
@ -2706,16 +2806,42 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> {
};
enum BoundsCheckKeyMode {
DONT_ALLOW_SMI_KEY,
ALLOW_SMI_KEY
};
class HBoundsCheck: public HTemplateInstruction<2> {
public:
HBoundsCheck(HValue* index, HValue* length) {
HBoundsCheck(HValue* index, HValue* length,
BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
: key_mode_(key_mode) {
SetOperandAt(0, index);
SetOperandAt(1, length);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) {
virtual Representation RequiredInputRepresentation(int arg_index) {
if (key_mode_ == DONT_ALLOW_SMI_KEY ||
!length()->representation().IsTagged()) {
return Representation::Integer32();
}
// If the index is tagged and isn't constant, then allow the length
// to be tagged, since it is usually already tagged from loading it out of
// the length field of a JSArray. This allows for direct comparison without
// untagging.
if (index()->representation().IsTagged() && !index()->IsConstant()) {
return Representation::Tagged();
}
// Also allow the length to be tagged if the index is constant, because
// it can be tagged to allow direct comparison.
if (index()->IsConstant() &&
index()->representation().IsInteger32() &&
arg_index == 1) {
return Representation::Tagged();
}
return Representation::Integer32();
}
@ -2728,6 +2854,7 @@ class HBoundsCheck: public HTemplateInstruction<2> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
BoundsCheckKeyMode key_mode_;
};
@ -2782,8 +2909,11 @@ class HMathFloorOfDiv: public HBinaryOperation {
: HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
@ -3388,6 +3518,47 @@ class HDiv: public HArithmeticBinaryOperation {
};
class HMathMinMax: public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
: HArithmeticBinaryOperation(context, left, right),
operation_(op) { }
virtual Representation RequiredInputRepresentation(int index) {
return index == 0
? Representation::Tagged()
: representation();
}
virtual Representation InferredRepresentation() {
if (left()->representation().IsInteger32() &&
right()->representation().IsInteger32()) {
return Representation::Integer32();
}
return Representation::Double();
}
virtual bool IsCommutative() const { return true; }
Operation operation() { return operation_; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
protected:
virtual bool DataEquals(HValue* other) {
return other->IsMathMinMax() &&
HMathMinMax::cast(other)->operation_ == operation_;
}
virtual Range* InferRange(Zone* zone);
private:
Operation operation_;
};
class HBitwise: public HBitwiseBinaryOperation {
public:
HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
@ -3484,11 +3655,11 @@ class HSar: public HBitwiseBinaryOperation {
class HOsrEntry: public HTemplateInstruction<0> {
public:
explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
SetGVNFlag(kChangesOsrEntries);
}
int ast_id() const { return ast_id_; }
BailoutId ast_id() const { return ast_id_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@ -3497,7 +3668,7 @@ class HOsrEntry: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
private:
int ast_id_;
BailoutId ast_id_;
};
@ -3596,7 +3767,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
ASSERT(!HEAP->allow_allocation(false));
ASSERT_ALLOCATION_DISABLED;
return reinterpret_cast<intptr_t>(*cell_);
}
@ -3983,10 +4154,11 @@ class ArrayInstructionInterface {
};
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HValue* dependency,
ElementsKind elements_kind = FAST_ELEMENTS)
: bit_field_(0) {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
@ -3997,6 +4169,7 @@ class HLoadKeyedFastElement
}
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, dependency);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
@ -4004,6 +4177,7 @@ class HLoadKeyedFastElement
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
@ -4020,9 +4194,9 @@ class HLoadKeyedFastElement
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
: Representation::Integer32();
if (index == 0) return Representation::Tagged();
if (index == 1) return Representation::Integer32();
return Representation::None();
}
virtual void PrintDataTo(StringStream* stream);
@ -4052,17 +4226,19 @@ enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
HValue* dependency,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, dependency);
set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
@ -4070,6 +4246,7 @@ class HLoadKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
@ -4079,9 +4256,9 @@ class HLoadKeyedFastDoubleElement
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
return index == 0
? Representation::Tagged()
: Representation::Integer32();
if (index == 0) return Representation::Tagged();
if (index == 1) return Representation::Integer32();
return Representation::None();
}
bool RequiresHoleCheck() {
@ -4108,16 +4285,18 @@ class HLoadKeyedFastDoubleElement
class HLoadKeyedSpecializedArrayElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* dependency,
ElementsKind elements_kind)
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
SetOperandAt(2, dependency);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
set_representation(Representation::Double());
@ -4133,15 +4312,15 @@ class HLoadKeyedSpecializedArrayElement
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
return index == 0
? Representation::External()
: Representation::Integer32();
// The key is supposed to be Integer32.
if (index == 0) return Representation::External();
if (index == 1) return Representation::Integer32();
return Representation::None();
}
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
@ -4797,10 +4976,12 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
HRegExpLiteral(HValue* context,
Handle<FixedArray> literals,
Handle<String> pattern,
Handle<String> flags,
int literal_index)
: HMaterializedLiteral<1>(literal_index, 0),
literals_(literals),
pattern_(pattern),
flags_(flags) {
SetOperandAt(0, context);
@ -4808,6 +4989,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
}
HValue* context() { return OperandAt(0); }
Handle<FixedArray> literals() { return literals_; }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
@ -4819,6 +5001,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> {
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
private:
Handle<FixedArray> literals_;
Handle<String> pattern_;
Handle<String> flags_;
};

1933
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save