diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index 77f38dd9c6..fe8425f021 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -18,6 +18,7 @@ #*# *~ .cpplint-cache +.d8_history d8 d8_g shell @@ -25,17 +26,32 @@ shell_g /build/Debug /build/gyp /build/Release -/obj/ -/out/ +/obj +/out +/test/cctest/cctest.status2 /test/es5conform/data +/test/message/message.status2 +/test/mjsunit/mjsunit.status2 +/test/mozilla/CHECKED_OUT_VERSION /test/mozilla/data +/test/mozilla/downloaded_* +/test/mozilla/mozilla.status2 +/test/preparser/preparser.status2 /test/sputnik/sputniktests /test/test262/data +/test/test262/test262-* +/test/test262/test262.status2 /third_party +/tools/jsfunfuzz +/tools/jsfunfuzz.zip /tools/oom_dump/oom_dump /tools/oom_dump/oom_dump.o /tools/visual_studio/Debug /tools/visual_studio/Release -/xcodebuild/ +/xcodebuild TAGS *.Makefile +GTAGS +GRTAGS +GSYMS +GPATH diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 1156d94958..c279e7c2d9 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -20,6 +20,7 @@ Burcu Dogan Craig Schlenter Daniel Andersson Daniel James +Derek J Conrod Dineel D Sule Erich Ocean Fedor Indutny @@ -44,6 +45,7 @@ Paolo Giarrusso Patrick Gansterer Peter Varga Rafal Krypa +Rajeev R Krithivasan Rene Rebe Robert Mustacchi Rodolph Perfetta @@ -53,6 +55,7 @@ Sanjoy Das Subrato K De Tobias Burnus Vlad Burlik +Xi Qian Yuqiang Xian Zaheer Ahmad Zhongping Wang diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 7110aa83e3..52601a467e 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,310 @@ +2012-12-10: Version 3.15.11 + + Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP + flags. + + Performance and stability improvements on all platforms. + + +2012-12-07: Version 3.15.10 + + Enabled optimisation of functions inside eval. (issue 2315) + + Fixed spec violations in methods of Number.prototype. (issue 2443) + + Added GCTracer metrics for a scavenger GC for DOM wrappers. + + Performance and stability improvements on all platforms. + + +2012-12-06: Version 3.15.9 + + Fixed candidate eviction in code flusher. + (Chromium issue 159140) + + Iterate through all arguments for side effects in Math.min/max. + (issue 2444) + + Fixed spec violations related to regexp.lastIndex + (issue 2437, issue 2438) + + Performance and stability improvements on all platforms. + + +2012-12-04: Version 3.15.8 + + Enforced stack allocation of TryCatch blocks. + (issue 2166,chromium:152389) + + Fixed external exceptions in external try-catch handlers. + (issue 2166) + + Activated incremental code flushing by default. + + Performance and stability improvements on all platforms. + + +2012-11-30: Version 3.15.7 + + Activated code aging by default. + + Included more information in --prof log. + + Removed eager sweeping for lazy swept spaces. Try to find in + SlowAllocateRaw a bounded number of times a big enough memory slot. + (issue 2194) + + Performance and stability improvements on all platforms. + + +2012-11-26: Version 3.15.6 + + Ensure double arrays are filled with holes when extended from + variations of empty arrays. (Chromium issue 162085) + + Performance and stability improvements on all platforms. + + +2012-11-23: Version 3.15.5 + + Fixed JSON.stringify for objects with interceptor handlers. + (Chromium issue 161028) + + Fixed corner case in x64 compare stubs. (issue 2416) + + Performance and stability improvements on all platforms. + + +2012-11-16: Version 3.15.4 + + Fixed Array.prototype.join evaluation order. (issue 2263) + + Perform CPU sampling by CPU sampling thread only iff processing thread + is not running. (issue 2364) + + When using an Object as a set in Object.getOwnPropertyNames, null out + the proto. (issue 2410) + + Disabled EXTRA_CHECKS in Release build. + + Heap explorer: Show representation of strings. + + Removed 'type' and 'arguments' properties from Error object. + (issue 2397) + + Added atomics implementation for ThreadSanitizer v2. + (Chromium issue 128314) + + Fixed LiveEdit crashes when object/array literal is added. (issue 2368) + + Performance and stability improvements on all platforms. + + +2012-11-13: Version 3.15.3 + + Changed sample shell to send non-JS output (e.g. errors) to stderr + instead of stdout. + + Correctly check for stack overflow even when interrupt is pending. + (issue 214) + + Collect stack trace on stack overflow. (issue 2394) + + Performance and stability improvements on all platforms. + + +2012-11-12: Version 3.15.2 + + Function::GetScriptOrigin supplies sourceURL when script name is + not available. (Chromium issue 159413) + + Made formatting error message side-effect-free. (issue 2398) + + Fixed length check in JSON.stringify. (Chromium issue 160010) + + ES6: Added support for Set and Map clear method (issue 2400) + + Fixed slack tracking when instance prototype changes. + (Chromium issue 157019) + + Fixed disabling of code flusher while marking. (Chromium issue 159140) + + Added a test case for object grouping in a scavenger GC (issue 2077) + + Support shared library build of Android for v8. + (Chromium issue 158821) + + ES6: Added support for size to Set and Map (issue 2395) + + Performance and stability improvements on all platforms. + + +2012-11-06: Version 3.15.1 + + Put incremental code flushing behind a flag. (Chromium issue 159140) + + Performance and stability improvements on all platforms. + + +2012-10-31: Version 3.15.0 + + Loosened aligned code target requirement on ARM (issue 2380) + + Fixed JSON.parse to treat leading zeros correctly. + (Chromium issue 158185) + + Performance and stability improvements on all platforms. + + +2012-10-22: Version 3.14.5 + + Killed off the SCons based build. + + Added a faster API for creating v8::Integer objects. + + Speeded up function deoptimization by avoiding quadratic pass over + optimized function list. (Chromium issue 155270) + + Always invoke the default Array.sort functions from builtin functions. + (issue 2372) + + Reverted recent CPU profiler changes because they broke --prof. + (issue 2364) + + Switched code flushing to use different JSFunction field. + (issue 1609) + + Performance and stability improvements on all platforms. + + +2012-10-15: Version 3.14.4 + + Allow evals for debugger even if they are prohibited in the debugee + context. (Chromium issue 154733) + + Enabled --verify-heap in release mode (issue 2120) + + Performance and stability improvements on all platforms. + + +2012-10-11: Version 3.14.3 + + Use native context to retrieve ErrorMessageForCodeGenerationFromStrings + (Chromium issue 155076). + + Bumped variable limit further to 2^17 (Chromium issue 151625). + + Performance and stability improvements on all platforms. + + +2012-10-10: Version 3.14.2 + + ARM: allowed VFP3 instructions when hardfloat is enabled. + (Chromium issue 152506) + + Fixed instance_descriptors() and PushStackTraceAndDie regressions. + (Chromium issue 151749) + + Made GDBJIT interface compile again. (issue 1804) + + Fixed Accessors::FunctionGetPrototype's proto chain traversal. + (Chromium issue 143967) + + Made sure that names of temporaries do not clash with real variables. + (issue 2322) + + Rejected local module declarations. (Chromium issue 150628) + + Rejected uses of lexical for-loop variable on the RHS. (issue 2322) + + Fixed slot recording of code target patches. + (Chromium issue 152615,chromium:144230) + + Changed the Android makefile to use GCC 4.6 instead of GCC 4.4.3. + + Performance and stability improvements on all platforms. + + +2012-10-01: Version 3.14.1 + + Don't set -m32 flag when compiling with Android ARM compiler. + (Chromium issue 143889) + + Restore the descriptor array before returning allocation failure. + (Chromium issue 151750) + + Lowered kMaxVirtualRegisters (v8 issue 2139, Chromium issues 123822 and + 128252). + + Pull more recent gyp in 'make dependencies'. + + Made sure that the generic KeyedStoreIC changes length and element_kind + atomically (issue 2346). + + Bumped number of allowed variables per scope to 65535, to address GWT. + (Chromium issue 151625) + + Support sourceURL for dynamically inserted scripts (issue 2342). + + Performance and stability improvements on all platforms. + + +2012-09-20: Version 3.14.0 + + Fixed missing slot recording during clearing of CallICs. + (Chromium issue 144230) + + Fixed LBoundsCheck on x64 to handle (stack slot + constant) correctly. + (Chromium issue 150729) + + Fixed minus zero test. (Issue 2133) + + Fixed setting array length to zero for slow elements. + (Chromium issue 146910) + + Fixed lost arguments dropping in HLeaveInlined. + (Chromium issue 150545) + + Fixed casting error for receiver of interceptors. + (Chromium issue 149912) + + Throw a more descriptive exception when blocking 'eval' via CSP. + (Chromium issue 140191) + + Fixed debugger's eval when close to stack overflow. (issue 2318) + + Added checks to live edit. (issue 2297) + + Switched on code compaction on incremental GCs. + + Fixed caching of optimized code for OSR. (issue 2326) + + Not mask exception thrown by toString in String::UtfValue etc. + (issue 2317) + + Fixed API check for length of external arrays. (Chromium issue 148896) + + Ensure correct enumeration indices in the dict (Chromium issue 148376) + + Correctly initialize regexp global cache. (Chromium issue 148378) + + Fixed arguments object materialization during deopt. (issue 2261) + + Introduced new API to expose external string resource regardless of + encoding. + + Fixed CHECK failure in LCodeGen::DoWrapReceiver when + --deopt-every-n-times flag is present + (Chromium issue 148389) + + Fixed edge case of extension with NULL as source string. + (Chromium issue 144649) + + Fixed array index dehoisting. (Chromium issue 141395) + + Performance and stability improvements on all platforms. + + 2012-09-11: Version 3.13.7 Enable/disable LiveEdit using the (C++) debug API. diff --git a/deps/v8/DEPS b/deps/v8/DEPS index e50d1d20f6..8d66960f2d 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -5,7 +5,7 @@ deps = { # Remember to keep the revision in sync with the Makefile. "v8/build/gyp": - "http://gyp.googlecode.com/svn/trunk@1282", + "http://gyp.googlecode.com/svn/trunk@1501", } deps_os = { diff --git a/deps/v8/Makefile b/deps/v8/Makefile index f688f18b7c..b65ea4c9f9 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -24,14 +24,13 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -PYTHON ?= python # Variable default definitions. Override them by exporting them in your shell. CXX ?= g++ LINK ?= g++ OUTDIR ?= out -TESTJOBS ?= -j16 +TESTJOBS ?= GYPFLAGS ?= TESTFLAGS ?= ANDROID_NDK_ROOT ?= @@ -59,6 +58,10 @@ endif ifeq ($(objectprint), on) GYPFLAGS += -Dv8_object_print=1 endif +# verifyheap=on +ifeq ($(verifyheap), on) + GYPFLAGS += -Dv8_enable_verify_heap=1 +endif # snapshot=off ifeq ($(snapshot), off) GYPFLAGS += -Dv8_use_snapshot='false' @@ -80,9 +83,9 @@ ifeq ($(liveobjectlist), on) endif # vfp3=off ifeq ($(vfp3), off) - GYPFLAGS += -Dv8_can_use_vfp_instructions=false + GYPFLAGS += -Dv8_can_use_vfp3_instructions=false else - GYPFLAGS += -Dv8_can_use_vfp_instructions=true + GYPFLAGS += -Dv8_can_use_vfp3_instructions=true endif # debuggersupport=off ifeq ($(debuggersupport), off) @@ -113,8 +116,6 @@ ifeq ($(hardfp), on) GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true endif -GYPFLAGS += "-Dpython=$(PYTHON)" - # ----------------- available targets: -------------------- # - "dependencies": pulls in external dependencies (currently: GYP) # - any arch listed in ARCHES (see below) @@ -182,7 +183,7 @@ $(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@) @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \ CXX="$(CXX)" LINK="$(LINK)" \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ - $(PYTHON) -c "print raw_input().capitalize()") \ + python -c "print raw_input().capitalize()") \ builddir="$(shell pwd)/$(OUTDIR)/$@" native: $(OUTDIR)/Makefile.native @@ -202,20 +203,20 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \ # Test targets. check: all - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ --arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \ $(TESTFLAGS) $(addsuffix .check,$(MODES)): $$(basename $$@) - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ --mode=$(basename $@) $(TESTFLAGS) $(addsuffix .check,$(ARCHES)): $$(basename $$@) - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ --arch=$(basename $@) $(TESTFLAGS) $(CHECKS): $$(basename $$@) - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ --arch-and-mode=$(basename $@) $(TESTFLAGS) $(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@) @@ -223,16 +224,16 @@ $(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@) $(shell pwd) $(ANDROID_V8) $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ --arch-and-mode=$(basename $@) \ --timeout=600 \ - --special-command="tools/android-run.py @" + --command-prefix="tools/android-run.py" $(addsuffix .check, $(ANDROID_ARCHES)): \ $(addprefix $$(basename $$@).,$(MODES)).check native.check: native - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ + @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ --arch-and-mode=. $(TESTFLAGS) # Clean targets. You can clean each architecture individually, or everything. @@ -253,14 +254,14 @@ clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES)) $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE) GYP_GENERATORS=make \ - $(PYTHON) build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ + build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. \ -Dv8_target_arch=$(subst .,,$(suffix $@)) \ -S.$(subst .,,$(suffix $@)) $(GYPFLAGS) $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE) GYP_GENERATORS=make \ - $(PYTHON) build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ + build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN: @@ -283,6 +284,7 @@ $(ENVFILE).new: echo "CXX=$(CXX)" >> $(ENVFILE).new # Dependencies. +# Remember to keep these in sync with the DEPS file. dependencies: svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \ - --revision 1282 + --revision 1501 diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android index a8d7fe148e..8e4ce0814a 100644 --- a/deps/v8/Makefile.android +++ b/deps/v8/Makefile.android @@ -48,11 +48,11 @@ endif ifeq ($(ARCH), android_arm) DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm DEFINES += arm_neon=0 armv7=1 - TOOLCHAIN_ARCH = arm-linux-androideabi-4.4.3 + TOOLCHAIN_ARCH = arm-linux-androideabi-4.6 else ifeq ($(ARCH), android_ia32) DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 - TOOLCHAIN_ARCH = x86-4.4.3 + TOOLCHAIN_ARCH = x86-4.6 else $(error Target architecture "${ARCH}" is not supported) endif diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS new file mode 100644 index 0000000000..941e5fe07d --- /dev/null +++ b/deps/v8/OWNERS @@ -0,0 +1,11 @@ +danno@chromium.org +jkummerow@chromium.org +mmassi@chromium.org +mstarzinger@chromium.org +mvstanton@chromium.org +rossberg@chromium.org +svenpanne@chromium.org +ulan@chromium.org +vegorov@chromium.org +verwaest@chromium.org +yangguo@chromium.org diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py new file mode 100644 index 0000000000..0077be941a --- /dev/null +++ b/deps/v8/PRESUBMIT.py @@ -0,0 +1,71 @@ +# Copyright 2012 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Top-level presubmit script for V8. + +See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts +for more details about the presubmit API built into gcl. +""" + +def _V8PresubmitChecks(input_api, output_api): + """Runs the V8 presubmit checks.""" + import sys + sys.path.append(input_api.os_path.join( + input_api.PresubmitLocalPath(), 'tools')) + from presubmit import CppLintProcessor + from presubmit import SourceProcessor + + results = [] + if not CppLintProcessor().Run(input_api.PresubmitLocalPath()): + results.append(output_api.PresubmitError("C++ lint check failed")) + if not SourceProcessor().Run(input_api.PresubmitLocalPath()): + results.append(output_api.PresubmitError( + "Copyright header and trailing whitespaces check failed")) + return results + + +def _CommonChecks(input_api, output_api): + """Checks common to both upload and commit.""" + results = [] + results.extend(input_api.canned_checks.CheckOwners( + input_api, output_api, source_file_filter=None)) + return results + + +def CheckChangeOnUpload(input_api, output_api): + results = [] + results.extend(_CommonChecks(input_api, output_api)) + return results + + +def CheckChangeOnCommit(input_api, output_api): + results = [] + results.extend(_CommonChecks(input_api, output_api)) + results.extend(input_api.canned_checks.CheckChangeHasDescription( + input_api, output_api)) + results.extend(_V8PresubmitChecks(input_api, output_api)) + return results diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index ebce7ff892..5f8616a6b8 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -59,7 +59,7 @@ LIBRARY_FLAGS = { 'CPPDEFINES': ['V8_INTERPRETED_REGEXP'] }, 'mode:debug': { - 'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT'] + 'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT', 'VERIFY_HEAP'] }, 'objectprint:on': { 'CPPDEFINES': ['OBJECT_PRINT'], @@ -1157,6 +1157,11 @@ SIMPLE_OPTIONS = { 'default': 'on', 'help': 'use fpu instructions when building the snapshot [MIPS only]' }, + 'I_know_I_should_build_with_GYP': { + 'values': ['yes', 'no'], + 'default': 'no', + 'help': 'grace period: temporarily override SCons deprecation' + } } @@ -1257,7 +1262,35 @@ def IsLegal(env, option, values): return True +def WarnAboutDeprecation(): + print """ + ##################################################################### + # # + # LAST WARNING: Building V8 with SCons is deprecated. # + # # + # This only works because you have overridden the kill switch. # + # # + # MIGRATE TO THE GYP-BASED BUILD NOW! # + # # + # Instructions: http://code.google.com/p/v8/wiki/BuildingWithGYP. # + # # + ##################################################################### + """ + + def VerifyOptions(env): + if env['I_know_I_should_build_with_GYP'] != 'yes': + Abort("Building V8 with SCons is no longer supported. Please use GYP " + "instead; you can find instructions are at " + "http://code.google.com/p/v8/wiki/BuildingWithGYP.\n\n" + "Quitting.\n\n" + "For a limited grace period, you can specify " + "\"I_know_I_should_build_with_GYP=yes\" to override.") + else: + WarnAboutDeprecation() + import atexit + atexit.register(WarnAboutDeprecation) + if not IsLegal(env, 'mode', ['debug', 'release']): return False if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]): @@ -1600,18 +1633,4 @@ try: except: pass - -def WarnAboutDeprecation(): - print """ -####################################################### -# WARNING: Building V8 with SCons is deprecated and # -# will not work much longer. Please switch to using # -# the GYP-based build now. Instructions are at # -# http://code.google.com/p/v8/wiki/BuildingWithGYP. # -####################################################### - """ - -WarnAboutDeprecation() -import atexit -atexit.register(WarnAboutDeprecation) Build() diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi index d2d1a35726..67a9d35820 100644 --- a/deps/v8/build/android.gypi +++ b/deps/v8/build/android.gypi @@ -122,8 +122,6 @@ 'ldflags': [ '-nostdlib', '-Wl,--no-undefined', - # Don't export symbols from statically linked libraries. - '-Wl,--exclude-libs=ALL', ], 'libraries!': [ '-lrt', # librt is built into Bionic. @@ -219,6 +217,13 @@ ['_type=="shared_library"', { 'ldflags': [ '-Wl,-shared,-Bsymbolic', + '<(android_lib)/crtbegin_so.o', + ], + }], + ['_type=="static_library"', { + 'ldflags': [ + # Don't export symbols from statically linked libraries. + '-Wl,--exclude-libs=ALL', ], }], ], diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 44cab4d0cb..e68ee15fde 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -43,7 +43,7 @@ # access is allowed for all CPUs. 'v8_can_use_unaligned_accesses%': 'default', - # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP + # Setting 'v8_can_use_vfp2_instructions' to 'true' will enable use of ARM VFP # instructions in the V8 generated code. VFP instructions will be enabled # both for the snapshot and for the ARM target. Leaving the default value # of 'false' will avoid VFP instructions in the snapshot and use CPU feature @@ -70,16 +70,15 @@ 'v8_enable_disassembler%': 0, - # Enable extra checks in API functions and other strategic places. - 'v8_enable_extra_checks%': 1, + 'v8_enable_gdbjit%': 0, 'v8_object_print%': 0, - 'v8_enable_gdbjit%': 0, - # Enable profiling support. Only required on Windows. 'v8_enable_prof%': 0, + 'v8_enable_verify_heap%': 0, + # Some versions of GCC 4.5 seem to need -fno-strict-aliasing. 'v8_no_strict_aliasing%': 0, @@ -103,9 +102,6 @@ # Interpreted regexp engine exists as platform-independent alternative # based where the regular expression is compiled to a bytecode. 'v8_interpreted_regexp%': 0, - - # Name of the python executable. - 'python%': 'python', }, 'target_defaults': { 'conditions': [ @@ -115,14 +111,14 @@ ['v8_enable_disassembler==1', { 'defines': ['ENABLE_DISASSEMBLER',], }], - ['v8_enable_extra_checks==1', { - 'defines': ['ENABLE_EXTRA_CHECKS',], + ['v8_enable_gdbjit==1', { + 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], ['v8_object_print==1', { 'defines': ['OBJECT_PRINT',], }], - ['v8_enable_gdbjit==1', { - 'defines': ['ENABLE_GDB_JIT_INTERFACE',], + ['v8_enable_verify_heap==1', { + 'defines': ['VERIFY_HEAP',], }], ['v8_interpreted_regexp==1', { 'defines': ['V8_INTERPRETED_REGEXP',], @@ -132,6 +128,11 @@ 'V8_TARGET_ARCH_ARM', ], 'conditions': [ + ['armv7==1', { + 'defines': [ + 'CAN_USE_ARMV7_INSTRUCTIONS=1', + ], + }], [ 'v8_can_use_unaligned_accesses=="true"', { 'defines': [ 'CAN_USE_UNALIGNED_ACCESSES=1', @@ -142,12 +143,16 @@ 'CAN_USE_UNALIGNED_ACCESSES=0', ], }], - [ 'v8_can_use_vfp2_instructions=="true"', { + # NEON implies VFP3 and VFP3 implies VFP2. + [ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \ + arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', { 'defines': [ 'CAN_USE_VFP2_INSTRUCTIONS', ], }], - [ 'v8_can_use_vfp3_instructions=="true"', { + # NEON implies VFP3. + [ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \ + arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', { 'defines': [ 'CAN_USE_VFP3_INSTRUCTIONS', ], @@ -198,10 +203,11 @@ ['mips_arch_variant=="mips32r2"', { 'cflags': ['-mips32r2', '-Wa,-mips32r2'], }], + ['mips_arch_variant=="mips32r1"', { + 'cflags': ['-mips32', '-Wa,-mips32'], + }], ['mips_arch_variant=="loongson"', { 'cflags': ['-mips3', '-Wa,-mips3'], - }, { - 'cflags': ['-mips32', '-Wa,-mips32'], }], ], }], @@ -274,7 +280,8 @@ }, }, }], - ['OS in "linux freebsd dragonflybsd openbsd solaris netbsd".split()', { + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + or OS=="netbsd"', { 'conditions': [ [ 'v8_no_strict_aliasing==1', { 'cflags': [ '-fno-strict-aliasing' ], @@ -284,8 +291,8 @@ ['OS=="solaris"', { 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. }], - ['(OS=="linux" or OS=="freebsd" or OS=="dragonflybsd" or OS=="openbsd" \ - or OS=="solaris" or OS=="netbsd" or OS=="mac" or OS=="android") and \ + ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + or OS=="netbsd" or OS=="mac" or OS=="android") and \ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ v8_target_arch=="mipsel")', { # Check whether the host compiler and target compiler support the @@ -304,16 +311,21 @@ ['_toolset=="target"', { 'variables': { 'm32flag': ' /dev/null 2>&1) && echo "-m32" || true)', + 'clang%': 0, }, - 'cflags': [ '<(m32flag)' ], - 'ldflags': [ '<(m32flag)' ], + 'conditions': [ + ['OS!="android" or clang==1', { + 'cflags': [ '<(m32flag)' ], + 'ldflags': [ '<(m32flag)' ], + }], + ], 'xcode_settings': { 'ARCHS': [ 'i386' ], }, }], ], }], - ['OS=="freebsd" or OS=="dragonflybsd" or OS=="openbsd"', { + ['OS=="freebsd" or OS=="openbsd"', { 'cflags': [ '-I/usr/local/include' ], }], ['OS=="netbsd"', { @@ -322,11 +334,15 @@ ], # conditions 'configurations': { 'Debug': { + 'variables': { + 'v8_enable_extra_checks%': 1, + }, 'defines': [ 'DEBUG', 'ENABLE_DISASSEMBLER', 'V8_ENABLE_CHECKS', 'OBJECT_PRINT', + 'VERIFY_HEAP', ], 'msvs_settings': { 'VCCLCompilerTool': { @@ -345,7 +361,10 @@ }, }, 'conditions': [ - ['OS in "linux freebsd dragonflybsd openbsd netbsd".split()', { + ['v8_enable_extra_checks==1', { + 'defines': ['ENABLE_EXTRA_CHECKS',], + }], + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], @@ -363,12 +382,32 @@ }], ], }], + ['OS=="mac"', { + 'xcode_settings': { + 'GCC_OPTIMIZATION_LEVEL': '0', # -O0 + }, + }], ], }, # Debug 'Release': { + 'variables': { + 'v8_enable_extra_checks%': 0, + }, 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="dragonflybsd" \ - or OS=="openbsd" or OS=="netbsd" or OS=="android"', { + ['v8_enable_extra_checks==1', { + 'defines': ['ENABLE_EXTRA_CHECKS',], + }], + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ + or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi index e6c7088997..7145a16e0c 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/build/standalone.gypi @@ -38,7 +38,8 @@ 'variables': { 'variables': { 'conditions': [ - ['OS!="win"', { + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ + OS=="netbsd" or OS=="mac"', { # This handles the Unix platforms we generally deal with. # Anything else gets passed through, which probably won't work # very well; such hosts should pass an explicit target_arch @@ -46,8 +47,9 @@ 'host_arch%': ' object) = 0; + protected: + virtual ~ObjectNameResolver() {} + }; + /** * Takes a heap snapshot and returns it. Title may be an empty string. * See HeapSnapshot::Type for types description. @@ -413,7 +427,8 @@ class V8EXPORT HeapProfiler { static const HeapSnapshot* TakeSnapshot( Handle title, HeapSnapshot::Type type = HeapSnapshot::kFull, - ActivityControl* control = NULL); + ActivityControl* control = NULL, + ObjectNameResolver* global_object_name_resolver = NULL); /** * Starts tracking of heap objects population statistics. After calling diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ddde388cd4..f577e937a5 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -76,6 +76,22 @@ #endif // _WIN32 +#if defined(__GNUC__) && !defined(DEBUG) +#define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator +#elif defined(_MSC_VER) && !defined(DEBUG) +#define V8_INLINE(declarator) __forceinline declarator +#else +#define V8_INLINE(declarator) inline declarator +#endif + +#if defined(__GNUC__) && !V8_DISABLE_DEPRECATIONS +#define V8_DEPRECATED(declarator) declarator __attribute__ ((deprecated)) +#elif defined(_MSC_VER) && !V8_DISABLE_DEPRECATIONS +#define V8_DEPRECATED(declarator) __declspec(deprecated) declarator +#else +#define V8_DEPRECATED(declarator) declarator +#endif + /** * The v8 JavaScript engine. */ @@ -176,12 +192,12 @@ template class Handle { /** * Creates an empty handle. */ - inline Handle() : val_(0) {} + V8_INLINE(Handle()) : val_(0) {} /** * Creates a new handle for the specified value. */ - inline explicit Handle(T* val) : val_(val) {} + V8_INLINE(explicit Handle(T* val)) : val_(val) {} /** * Creates a handle for the contents of the specified handle. This @@ -193,7 +209,7 @@ template class Handle { * Handle to a variable declared as Handle, is legal * because String is a subclass of Value. */ - template inline Handle(Handle that) + template V8_INLINE(Handle(Handle that)) : val_(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -206,16 +222,16 @@ template class Handle { /** * Returns true if the handle is empty. */ - inline bool IsEmpty() const { return val_ == 0; } + V8_INLINE(bool IsEmpty() const) { return val_ == 0; } /** * Sets the handle to be empty. IsEmpty() will then return true. */ - inline void Clear() { val_ = 0; } + V8_INLINE(void Clear()) { val_ = 0; } - inline T* operator->() const { return val_; } + V8_INLINE(T* operator->() const) { return val_; } - inline T* operator*() const { return val_; } + V8_INLINE(T* operator*() const) { return val_; } /** * Checks whether two handles are the same. @@ -223,7 +239,7 @@ template class Handle { * to which they refer are identical. * The handles' references are not checked. */ - template inline bool operator==(Handle that) const { + template V8_INLINE(bool operator==(Handle that) const) { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -237,11 +253,11 @@ template class Handle { * the objects to which they refer are different. * The handles' references are not checked. */ - template inline bool operator!=(Handle that) const { + template V8_INLINE(bool operator!=(Handle that) const) { return !operator==(that); } - template static inline Handle Cast(Handle that) { + template V8_INLINE(static Handle Cast(Handle that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -250,7 +266,7 @@ template class Handle { return Handle(T::Cast(*that)); } - template inline Handle As() { + template V8_INLINE(Handle As()) { return Handle::Cast(*this); } @@ -268,8 +284,8 @@ template class Handle { */ template class Local : public Handle { public: - inline Local(); - template inline Local(Local that) + V8_INLINE(Local()); + template V8_INLINE(Local(Local that)) : Handle(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -278,8 +294,8 @@ template class Local : public Handle { */ TYPE_CHECK(T, S); } - template inline Local(S* that) : Handle(that) { } - template static inline Local Cast(Local that) { + template V8_INLINE(Local(S* that) : Handle(that)) { } + template V8_INLINE(static Local Cast(Local that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -288,15 +304,17 @@ template class Local : public Handle { return Local(T::Cast(*that)); } - template inline Local As() { + template V8_INLINE(Local As()) { return Local::Cast(*this); } - /** Create a local handle for the content of another handle. - * The referee is kept alive by the local handle even when - * the original handle is destroyed/disposed. + /** + * Create a local handle for the content of another handle. + * The referee is kept alive by the local handle even when + * the original handle is destroyed/disposed. */ - inline static Local New(Handle that); + V8_INLINE(static Local New(Handle that)); + V8_INLINE(static Local New(Isolate* isolate, Handle that)); }; @@ -323,7 +341,7 @@ template class Persistent : public Handle { * Creates an empty persistent handle that doesn't point to any * storage cell. */ - inline Persistent(); + V8_INLINE(Persistent()); /** * Creates a persistent handle for the same storage cell as the @@ -336,7 +354,7 @@ template class Persistent : public Handle { * Persistent to a variable declared as Persistent, * is allowed as String is a subclass of Value. */ - template inline Persistent(Persistent that) + template V8_INLINE(Persistent(Persistent that)) : Handle(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -346,16 +364,16 @@ template class Persistent : public Handle { TYPE_CHECK(T, S); } - template inline Persistent(S* that) : Handle(that) { } + template V8_INLINE(Persistent(S* that)) : Handle(that) { } /** * "Casts" a plain handle which is known to be a persistent handle * to a persistent handle. */ - template explicit inline Persistent(Handle that) + template explicit V8_INLINE(Persistent(Handle that)) : Handle(*that) { } - template static inline Persistent Cast(Persistent that) { + template V8_INLINE(static Persistent Cast(Persistent that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -364,7 +382,7 @@ template class Persistent : public Handle { return Persistent(T::Cast(*that)); } - template inline Persistent As() { + template V8_INLINE(Persistent As()) { return Persistent::Cast(*this); } @@ -372,7 +390,7 @@ template class Persistent : public Handle { * Creates a new persistent handle for an existing local or * persistent handle. */ - inline static Persistent New(Handle that); + V8_INLINE(static Persistent New(Handle that)); /** * Releases the storage cell referenced by this persistent handle. @@ -380,7 +398,8 @@ template class Persistent : public Handle { * This handle's reference, and any other references to the storage * cell remain and IsEmpty will still return false. */ - inline void Dispose(); + V8_INLINE(void Dispose()); + V8_INLINE(void Dispose(Isolate* isolate)); /** * Make the reference to this object weak. When only weak handles @@ -388,10 +407,13 @@ template class Persistent : public Handle { * callback to the given V8::WeakReferenceCallback function, passing * it the object reference and the given parameters. */ - inline void MakeWeak(void* parameters, WeakReferenceCallback callback); + V8_INLINE(void MakeWeak(void* parameters, WeakReferenceCallback callback)); + V8_INLINE(void MakeWeak(Isolate* isolate, + void* parameters, + WeakReferenceCallback callback)); - /** Clears the weak reference to this object.*/ - inline void ClearWeak(); + /** Clears the weak reference to this object. */ + V8_INLINE(void ClearWeak()); /** * Marks the reference to this object independent. Garbage collector @@ -400,23 +422,42 @@ template class Persistent : public Handle { * assume that it will be preceded by a global GC prologue callback * or followed by a global GC epilogue callback. */ - inline void MarkIndependent(); + V8_INLINE(void MarkIndependent()); + V8_INLINE(void MarkIndependent(Isolate* isolate)); /** - *Checks if the handle holds the only reference to an object. + * Marks the reference to this object partially dependent. Partially + * dependent handles only depend on other partially dependent handles and + * these dependencies are provided through object groups. It provides a way + * to build smaller object groups for young objects that represent only a + * subset of all external dependencies. This mark is automatically cleared + * after each garbage collection. */ - inline bool IsNearDeath() const; + V8_INLINE(void MarkPartiallyDependent()); + V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)); - /** - * Returns true if the handle's reference is weak. - */ - inline bool IsWeak() const; + /** Returns true if this handle was previously marked as independent. */ + V8_INLINE(bool IsIndependent() const); + V8_INLINE(bool IsIndependent(Isolate* isolate) const); + + /** Checks if the handle holds the only reference to an object. */ + V8_INLINE(bool IsNearDeath() const); + + /** Returns true if the handle's reference is weak. */ + V8_INLINE(bool IsWeak() const); + V8_INLINE(bool IsWeak(Isolate* isolate) const); /** * Assigns a wrapper class ID to the handle. See RetainedObjectInfo * interface description in v8-profiler.h for details. */ - inline void SetWrapperClassId(uint16_t class_id); + V8_INLINE(void SetWrapperClassId(uint16_t class_id)); + + /** + * Returns the class ID previously assigned to this handle or 0 if no class + * ID was previously assigned. + */ + V8_INLINE(uint16_t WrapperClassId() const); private: friend class ImplementationUtilities; @@ -459,12 +500,14 @@ class V8EXPORT HandleScope { * Creates a new handle with the given value. */ static internal::Object** CreateHandle(internal::Object* value); + static internal::Object** CreateHandle(internal::Isolate* isolate, + internal::Object* value); // Faster version, uses HeapObject to obtain the current Isolate. static internal::Object** CreateHandle(internal::HeapObject* value); private: - // Make it impossible to create heap-allocated or illegal handle - // scopes by disallowing certain operations. + // Make it hard to create heap-allocated or illegal handle scopes by + // disallowing certain operations. HandleScope(const HandleScope&); void operator=(const HandleScope&); void* operator new(size_t size); @@ -477,7 +520,7 @@ class V8EXPORT HandleScope { internal::Object** next; internal::Object** limit; int level; - inline void Initialize() { + V8_INLINE(void Initialize()) { next = limit = NULL; level = 0; } @@ -570,16 +613,16 @@ class V8EXPORT ScriptData { // NOLINT */ class ScriptOrigin { public: - inline ScriptOrigin( + V8_INLINE(ScriptOrigin( Handle resource_name, Handle resource_line_offset = Handle(), - Handle resource_column_offset = Handle()) + Handle resource_column_offset = Handle())) : resource_name_(resource_name), resource_line_offset_(resource_line_offset), resource_column_offset_(resource_column_offset) { } - inline Handle ResourceName() const; - inline Handle ResourceLineOffset() const; - inline Handle ResourceColumnOffset() const; + V8_INLINE(Handle ResourceName() const); + V8_INLINE(Handle ResourceLineOffset() const); + V8_INLINE(Handle ResourceColumnOffset() const); private: Handle resource_name_; Handle resource_line_offset_; @@ -867,13 +910,13 @@ class Value : public Data { * Returns true if this value is the undefined value. See ECMA-262 * 4.3.10. */ - inline bool IsUndefined() const; + V8_INLINE(bool IsUndefined() const); /** * Returns true if this value is the null value. See ECMA-262 * 4.3.11. */ - inline bool IsNull() const; + V8_INLINE(bool IsNull() const); /** * Returns true if this value is true. @@ -889,7 +932,7 @@ class Value : public Data { * Returns true if this value is an instance of the String type. * See ECMA-262 8.4. */ - inline bool IsString() const; + V8_INLINE(bool IsString() const); /** * Returns true if this value is a function. @@ -987,9 +1030,9 @@ class Value : public Data { V8EXPORT bool StrictEquals(Handle that) const; private: - inline bool QuickIsUndefined() const; - inline bool QuickIsNull() const; - inline bool QuickIsString() const; + V8_INLINE(bool QuickIsUndefined() const); + V8_INLINE(bool QuickIsNull() const); + V8_INLINE(bool QuickIsString() const); V8EXPORT bool FullIsUndefined() const; V8EXPORT bool FullIsNull() const; V8EXPORT bool FullIsString() const; @@ -1009,7 +1052,7 @@ class Primitive : public Value { }; class Boolean : public Primitive { public: V8EXPORT bool Value() const; - static inline Handle New(bool value); + V8_INLINE(static Handle New(bool value)); }; @@ -1018,6 +1061,11 @@ class Boolean : public Primitive { */ class String : public Primitive { public: + enum Encoding { + UNKNOWN_ENCODING = 0x1, + TWO_BYTE_ENCODING = 0x0, + ASCII_ENCODING = 0x4 + }; /** * Returns the number of characters in this string. */ @@ -1089,7 +1137,7 @@ class String : public Primitive { * A zero length string. */ V8EXPORT static v8::Local Empty(); - inline static v8::Local Empty(Isolate* isolate); + V8_INLINE(static v8::Local Empty(Isolate* isolate)); /** * Returns true if the string is external @@ -1180,11 +1228,19 @@ class String : public Primitive { ExternalAsciiStringResource() {} }; + /** + * If the string is an external string, return the ExternalStringResourceBase + * regardless of the encoding, otherwise return NULL. The encoding of the + * string is returned in encoding_out. + */ + V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase( + Encoding* encoding_out) const); + /** * Get the ExternalStringResource for an external string. Returns * NULL if IsExternal() doesn't return true. */ - inline ExternalStringResource* GetExternalStringResource() const; + V8_INLINE(ExternalStringResource* GetExternalStringResource() const); /** * Get the ExternalAsciiStringResource for an external ASCII string. @@ -1193,7 +1249,7 @@ class String : public Primitive { V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource() const; - static inline String* Cast(v8::Value* obj); + V8_INLINE(static String* Cast(v8::Value* obj)); /** * Allocates a new string from either UTF-8 encoded or ASCII data. @@ -1343,6 +1399,8 @@ class String : public Primitive { }; private: + V8EXPORT void VerifyExternalStringResourceBase(ExternalStringResourceBase* v, + Encoding encoding) const; V8EXPORT void VerifyExternalStringResource(ExternalStringResource* val) const; V8EXPORT static void CheckCast(v8::Value* obj); }; @@ -1355,7 +1413,7 @@ class Number : public Primitive { public: V8EXPORT double Value() const; V8EXPORT static Local New(double value); - static inline Number* Cast(v8::Value* obj); + V8_INLINE(static Number* Cast(v8::Value* obj)); private: V8EXPORT Number(); V8EXPORT static void CheckCast(v8::Value* obj); @@ -1369,8 +1427,10 @@ class Integer : public Number { public: V8EXPORT static Local New(int32_t value); V8EXPORT static Local NewFromUnsigned(uint32_t value); + V8EXPORT static Local New(int32_t value, Isolate*); + V8EXPORT static Local NewFromUnsigned(uint32_t value, Isolate*); V8EXPORT int64_t Value() const; - static inline Integer* Cast(v8::Value* obj); + V8_INLINE(static Integer* Cast(v8::Value* obj)); private: V8EXPORT Integer(); V8EXPORT static void CheckCast(v8::Value* obj); @@ -1565,16 +1625,42 @@ class Object : public Value { /** Gets the number of internal fields for this Object. */ V8EXPORT int InternalFieldCount(); - /** Gets the value in an internal field. */ - inline Local GetInternalField(int index); + + /** Gets the value from an internal field. */ + V8_INLINE(Local GetInternalField(int index)); + /** Sets the value in an internal field. */ V8EXPORT void SetInternalField(int index, Handle value); - /** Gets a native pointer from an internal field. */ - inline void* GetPointerFromInternalField(int index); + /** + * Gets a native pointer from an internal field. Deprecated. If the pointer is + * always 2-byte-aligned, use GetAlignedPointerFromInternalField instead, + * otherwise use a combination of GetInternalField, External::Cast and + * External::Value. + */ + V8EXPORT V8_DEPRECATED(void* GetPointerFromInternalField(int index)); + + /** + * Sets a native pointer in an internal field. Deprecated. If the pointer is + * always 2-byte aligned, use SetAlignedPointerInInternalField instead, + * otherwise use a combination of External::New and SetInternalField. + */ + V8_DEPRECATED(V8_INLINE(void SetPointerInInternalField(int index, + void* value))); + + /** + * Gets a 2-byte-aligned native pointer from an internal field. This field + * must have been set by SetAlignedPointerInInternalField, everything else + * leads to undefined behavior. + */ + V8_INLINE(void* GetAlignedPointerFromInternalField(int index)); - /** Sets a native pointer in an internal field. */ - V8EXPORT void SetPointerInInternalField(int index, void* value); + /** + * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such + * a field, GetAlignedPointerFromInternalField must be used, everything else + * leads to undefined behavior. + */ + V8EXPORT void SetAlignedPointerInInternalField(int index, void* value); // Testers for local properties. V8EXPORT bool HasOwnProperty(Handle key); @@ -1700,19 +1786,13 @@ class Object : public Value { Handle argv[]); V8EXPORT static Local New(); - static inline Object* Cast(Value* obj); + V8_INLINE(static Object* Cast(Value* obj)); private: V8EXPORT Object(); V8EXPORT static void CheckCast(Value* obj); - V8EXPORT Local CheckedGetInternalField(int index); - V8EXPORT void* SlowGetPointerFromInternalField(int index); - - /** - * If quick access to the internal field is possible this method - * returns the value. Otherwise an empty handle is returned. - */ - inline Local UncheckedGetInternalField(int index); + V8EXPORT Local SlowGetInternalField(int index); + V8EXPORT void* SlowGetAlignedPointerFromInternalField(int index); }; @@ -1735,7 +1815,7 @@ class Array : public Object { */ V8EXPORT static Local New(int length = 0); - static inline Array* Cast(Value* obj); + V8_INLINE(static Array* Cast(Value* obj)); private: V8EXPORT Array(); V8EXPORT static void CheckCast(Value* obj); @@ -1775,7 +1855,7 @@ class Function : public Object { V8EXPORT int GetScriptColumnNumber() const; V8EXPORT Handle GetScriptId() const; V8EXPORT ScriptOrigin GetScriptOrigin() const; - static inline Function* Cast(Value* obj); + V8_INLINE(static Function* Cast(Value* obj)); V8EXPORT static const int kLineOffsetNotFound; private: @@ -1797,7 +1877,7 @@ class Date : public Object { */ V8EXPORT double NumberValue() const; - static inline Date* Cast(v8::Value* obj); + V8_INLINE(static Date* Cast(v8::Value* obj)); /** * Notification that the embedder has changed the time zone, @@ -1830,7 +1910,7 @@ class NumberObject : public Object { */ V8EXPORT double NumberValue() const; - static inline NumberObject* Cast(v8::Value* obj); + V8_INLINE(static NumberObject* Cast(v8::Value* obj)); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1849,7 +1929,7 @@ class BooleanObject : public Object { */ V8EXPORT bool BooleanValue() const; - static inline BooleanObject* Cast(v8::Value* obj); + V8_INLINE(static BooleanObject* Cast(v8::Value* obj)); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1868,7 +1948,7 @@ class StringObject : public Object { */ V8EXPORT Local StringValue() const; - static inline StringObject* Cast(v8::Value* obj); + V8_INLINE(static StringObject* Cast(v8::Value* obj)); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1915,7 +1995,7 @@ class RegExp : public Object { */ V8EXPORT Flags GetFlags() const; - static inline RegExp* Cast(v8::Value* obj); + V8_INLINE(static RegExp* Cast(v8::Value* obj)); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1923,29 +2003,22 @@ class RegExp : public Object { /** - * A JavaScript value that wraps a C++ void*. This type of value is - * mainly used to associate C++ data structures with JavaScript - * objects. - * - * The Wrap function V8 will return the most optimal Value object wrapping the - * C++ void*. The type of the value is not guaranteed to be an External object - * and no assumptions about its type should be made. To access the wrapped - * value Unwrap should be used, all other operations on that object will lead - * to unpredictable results. + * A JavaScript value that wraps a C++ void*. This type of value is mainly used + * to associate C++ data structures with JavaScript objects. */ class External : public Value { public: - V8EXPORT static Local Wrap(void* data); - static inline void* Unwrap(Handle obj); + /** Deprecated, use New instead. */ + V8_DEPRECATED(V8_INLINE(static Local Wrap(void* value))); + + /** Deprecated, use a combination of Cast and Value instead. */ + V8_DEPRECATED(V8_INLINE(static void* Unwrap(Handle obj))); V8EXPORT static Local New(void* value); - static inline External* Cast(Value* obj); + V8_INLINE(static External* Cast(Value* obj)); V8EXPORT void* Value() const; private: - V8EXPORT External(); V8EXPORT static void CheckCast(v8::Value* obj); - static inline void* QuickUnwrap(Handle obj); - V8EXPORT static void* FullUnwrap(Handle obj); }; @@ -1960,7 +2033,7 @@ class V8EXPORT Template : public Data { /** Adds a property to each instance created by this template.*/ void Set(Handle name, Handle value, PropertyAttribute attributes = None); - inline void Set(const char* name, Handle value); + V8_INLINE(void Set(const char* name, Handle value)); private: Template(); @@ -1977,14 +2050,14 @@ class V8EXPORT Template : public Data { */ class Arguments { public: - inline int Length() const; - inline Local operator[](int i) const; - inline Local Callee() const; - inline Local This() const; - inline Local Holder() const; - inline bool IsConstructCall() const; - inline Local Data() const; - inline Isolate* GetIsolate() const; + V8_INLINE(int Length() const); + V8_INLINE(Local operator[](int i) const); + V8_INLINE(Local Callee() const); + V8_INLINE(Local This() const); + V8_INLINE(Local Holder() const); + V8_INLINE(bool IsConstructCall() const); + V8_INLINE(Local Data() const); + V8_INLINE(Isolate* GetIsolate() const); private: static const int kIsolateIndex = 0; @@ -1993,10 +2066,10 @@ class Arguments { static const int kHolderIndex = -3; friend class ImplementationUtilities; - inline Arguments(internal::Object** implicit_args, + V8_INLINE(Arguments(internal::Object** implicit_args, internal::Object** values, int length, - bool is_construct_call); + bool is_construct_call)); internal::Object** implicit_args_; internal::Object** values_; int length_; @@ -2010,12 +2083,12 @@ class Arguments { */ class V8EXPORT AccessorInfo { public: - inline AccessorInfo(internal::Object** args) + V8_INLINE(AccessorInfo(internal::Object** args)) : args_(args) { } - inline Isolate* GetIsolate() const; - inline Local Data() const; - inline Local This() const; - inline Local Holder() const; + V8_INLINE(Isolate* GetIsolate() const); + V8_INLINE(Local Data() const); + V8_INLINE(Local This() const); + V8_INLINE(Local Holder() const); private: internal::Object** args_; @@ -2580,7 +2653,7 @@ void V8EXPORT RegisterExtension(Extension* extension); */ class V8EXPORT DeclareExtension { public: - inline DeclareExtension(Extension* extension) { + V8_INLINE(DeclareExtension(Extension* extension)) { RegisterExtension(extension); } }; @@ -2594,10 +2667,10 @@ Handle V8EXPORT Null(); Handle V8EXPORT True(); Handle V8EXPORT False(); -inline Handle Undefined(Isolate* isolate); -inline Handle Null(Isolate* isolate); -inline Handle True(Isolate* isolate); -inline Handle False(Isolate* isolate); +V8_INLINE(Handle Undefined(Isolate* isolate)); +V8_INLINE(Handle Null(Isolate* isolate)); +V8_INLINE(Handle True(Isolate* isolate)); +V8_INLINE(Handle False(Isolate* isolate)); /** @@ -2638,7 +2711,7 @@ bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints); typedef void (*FatalErrorCallback)(const char* location, const char* message); -typedef void (*MessageCallback)(Handle message, Handle data); +typedef void (*MessageCallback)(Handle message, Handle error); /** @@ -2751,6 +2824,7 @@ class V8EXPORT HeapStatistics { HeapStatistics(); size_t total_heap_size() { return total_heap_size_; } size_t total_heap_size_executable() { return total_heap_size_executable_; } + size_t total_physical_size() { return total_physical_size_; } size_t used_heap_size() { return used_heap_size_; } size_t heap_size_limit() { return heap_size_limit_; } @@ -2759,11 +2833,15 @@ class V8EXPORT HeapStatistics { void set_total_heap_size_executable(size_t size) { total_heap_size_executable_ = size; } + void set_total_physical_size(size_t size) { + total_physical_size_ = size; + } void set_used_heap_size(size_t size) { used_heap_size_ = size; } void set_heap_size_limit(size_t size) { heap_size_limit_ = size; } size_t total_heap_size_; size_t total_heap_size_executable_; + size_t total_physical_size_; size_t used_heap_size_; size_t heap_size_limit_; @@ -2849,13 +2927,13 @@ class V8EXPORT Isolate { /** * Associate embedder-specific data with the isolate */ - inline void SetData(void* data); + V8_INLINE(void SetData(void* data)); /** * Retrieve embedder-specific data from the isolate. * Returns NULL if SetData has never been called. */ - inline void* GetData(); + V8_INLINE(void* GetData()); private: Isolate(); @@ -2996,7 +3074,7 @@ typedef void (*JitCodeEventHandler)(const JitCodeEvent* event); /** - * Interface for iterating though all external resources in the heap. + * Interface for iterating through all external resources in the heap. */ class V8EXPORT ExternalResourceVisitor { // NOLINT public: @@ -3005,6 +3083,17 @@ class V8EXPORT ExternalResourceVisitor { // NOLINT }; +/** + * Interface for iterating through all the persistent handles in the heap. + */ +class V8EXPORT PersistentHandleVisitor { // NOLINT + public: + virtual ~PersistentHandleVisitor() {} + virtual void VisitPersistentHandle(Persistent value, + uint16_t class_id) {} +}; + + /** * Container class for static utility functions. */ @@ -3070,8 +3159,7 @@ class V8EXPORT V8 { * The same message listener can be added more than once and in that * case it will be called more than once for each message. */ - static bool AddMessageListener(MessageCallback that, - Handle data = Handle()); + static bool AddMessageListener(MessageCallback that); /** * Remove all message listeners from the specified callback function. @@ -3117,12 +3205,6 @@ class V8EXPORT V8 { static void SetCreateHistogramFunction(CreateHistogramCallback); static void SetAddHistogramSampleFunction(AddHistogramSampleCallback); - /** - * Enables the computation of a sliding window of states. The sliding - * window information is recorded in statistics counters. - */ - static void EnableSlidingStateWindow(); - /** Callback function for reporting failed access checks.*/ static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback); @@ -3217,12 +3299,19 @@ class V8EXPORT V8 { * After each garbage collection, object groups are removed. It is * intended to be used in the before-garbage-collection callback * function, for instance to simulate DOM tree connections among JS - * wrapper objects. + * wrapper objects. Object groups for all dependent handles need to + * be provided for kGCTypeMarkSweepCompact collections, for all other + * garbage collection types it is sufficient to provide object groups + * for partially dependent handles only. * See v8-profiler.h for RetainedObjectInfo interface description. */ static void AddObjectGroup(Persistent* objects, size_t length, RetainedObjectInfo* info = NULL); + static void AddObjectGroup(Isolate* isolate, + Persistent* objects, + size_t length, + RetainedObjectInfo* info = NULL); /** * Allows the host application to declare implicit references between @@ -3407,11 +3496,17 @@ class V8EXPORT V8 { /** * Iterates through all external resources referenced from current isolate - * heap. This method is not expected to be used except for debugging purposes - * and may be quite slow. + * heap. GC is not invoked prior to iterating, therefore there is no + * guarantee that visited objects are still alive. */ static void VisitExternalResources(ExternalResourceVisitor* visitor); + /** + * Iterates through all the persistent handles in the current isolate's heap + * that have class_ids. + */ + static void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor); + /** * Optional notification that the embedder is idle. * V8 uses the notification to reduce memory footprint. @@ -3445,15 +3540,32 @@ class V8EXPORT V8 { static internal::Object** GlobalizeReference(internal::Object** handle); static void DisposeGlobal(internal::Object** global_handle); + static void DisposeGlobal(internal::Isolate* isolate, + internal::Object** global_handle); static void MakeWeak(internal::Object** global_handle, void* data, WeakReferenceCallback); + static void MakeWeak(internal::Isolate* isolate, + internal::Object** global_handle, + void* data, + WeakReferenceCallback); static void ClearWeak(internal::Object** global_handle); static void MarkIndependent(internal::Object** global_handle); + static void MarkIndependent(internal::Isolate* isolate, + internal::Object** global_handle); + static void MarkPartiallyDependent(internal::Object** global_handle); + static void MarkPartiallyDependent(internal::Isolate* isolate, + internal::Object** global_handle); + static bool IsGlobalIndependent(internal::Object** global_handle); + static bool IsGlobalIndependent(internal::Isolate* isolate, + internal::Object** global_handle); static bool IsGlobalNearDeath(internal::Object** global_handle); static bool IsGlobalWeak(internal::Object** global_handle); + static bool IsGlobalWeak(internal::Isolate* isolate, + internal::Object** global_handle); static void SetWrapperClassId(internal::Object** global_handle, uint16_t class_id); + static uint16_t GetWrapperClassId(internal::Object** global_handle); template friend class Handle; template friend class Local; @@ -3468,7 +3580,9 @@ class V8EXPORT V8 { class V8EXPORT TryCatch { public: /** - * Creates a new try/catch block and registers it with v8. + * Creates a new try/catch block and registers it with v8. Note that + * all TryCatch blocks should be stack allocated because the memory + * location itself is compared against JavaScript try/catch blocks. */ TryCatch(); @@ -3558,6 +3672,12 @@ class V8EXPORT TryCatch { void SetCaptureMessage(bool value); private: + // Make it hard to create heap-allocated TryCatch blocks. + TryCatch(const TryCatch&); + void operator=(const TryCatch&); + void* operator new(size_t size); + void operator delete(void*, size_t); + v8::internal::Isolate* isolate_; void* next_; void* exception_; @@ -3699,12 +3819,45 @@ class V8EXPORT Context { static bool InContext(); /** - * Associate an additional data object with the context. This is mainly used - * with the debugger to provide additional information on the context through - * the debugger API. + * Gets embedder data with index 0. Deprecated, use GetEmbedderData with index + * 0 instead. */ - void SetData(Handle data); - Local GetData(); + V8_DEPRECATED(V8_INLINE(Local GetData())); + + /** + * Sets embedder data with index 0. Deprecated, use SetEmbedderData with index + * 0 instead. + */ + V8_DEPRECATED(V8_INLINE(void SetData(Handle value))); + + /** + * Gets the embedder data with the given index, which must have been set by a + * previous call to SetEmbedderData with the same index. Note that index 0 + * currently has a special meaning for Chrome's debugger. + */ + V8_INLINE(Local GetEmbedderData(int index)); + + /** + * Sets the embedder data with the given index, growing the data as + * needed. Note that index 0 currently has a special meaning for Chrome's + * debugger. + */ + void SetEmbedderData(int index, Handle value); + + /** + * Gets a 2-byte-aligned native pointer from the embedder data with the given + * index, which must have bees set by a previous call to + * SetAlignedPointerInEmbedderData with the same index. Note that index 0 + * currently has a special meaning for Chrome's debugger. + */ + V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index)); + + /** + * Sets a 2-byte-aligned native pointer in the embedder data with the given + * index, growing the data as needed. Note that index 0 currently has a + * special meaning for Chrome's debugger. + */ + void SetAlignedPointerInEmbedderData(int index, void* value); /** * Control whether code generation from strings is allowed. Calling @@ -3727,16 +3880,23 @@ class V8EXPORT Context { */ bool IsCodeGenerationFromStringsAllowed(); + /** + * Sets the error description for the exception that is thrown when + * code generation from strings is not allowed and 'eval' or the 'Function' + * constructor are called. + */ + void SetErrorMessageForCodeGenerationFromStrings(Handle message); + /** * Stack-allocated class which sets the execution context for all * operations executed within a local scope. */ class Scope { public: - explicit inline Scope(Handle context) : context_(context) { + explicit V8_INLINE(Scope(Handle context)) : context_(context) { context_->Enter(); } - inline ~Scope() { context_->Exit(); } + V8_INLINE(~Scope()) { context_->Exit(); } private: Handle context_; }; @@ -3746,6 +3906,9 @@ class V8EXPORT Context { friend class Script; friend class Object; friend class Function; + + Local SlowGetEmbedderData(int index); + void* SlowGetAlignedPointerFromEmbedderData(int index); }; @@ -3974,47 +4137,27 @@ template struct SmiTagging; template <> struct SmiTagging<4> { static const int kSmiShiftSize = 0; static const int kSmiValueSize = 31; - static inline int SmiToInt(internal::Object* value) { + V8_INLINE(static int SmiToInt(internal::Object* value)) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Throw away top 32 bits and shift down (requires >> to be sign extending). return static_cast(reinterpret_cast(value)) >> shift_bits; } - - // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi - // with a plain reinterpret_cast. - static const uintptr_t kEncodablePointerMask = 0x1; - static const int kPointerToSmiShift = 0; }; // Smi constants for 64-bit systems. template <> struct SmiTagging<8> { static const int kSmiShiftSize = 31; static const int kSmiValueSize = 32; - static inline int SmiToInt(internal::Object* value) { + V8_INLINE(static int SmiToInt(internal::Object* value)) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Shift down and throw away top 32 bits. return static_cast(reinterpret_cast(value) >> shift_bits); } - - // To maximize the range of pointers that can be encoded - // in the available 32 bits, we require them to be 8 bytes aligned. - // This gives 2 ^ (32 + 3) = 32G address space covered. - // It might be not enough to cover stack allocated objects on some platforms. - static const int kPointerAlignment = 3; - - static const uintptr_t kEncodablePointerMask = - ~(uintptr_t(0xffffffff) << kPointerAlignment); - - static const int kPointerToSmiShift = - kSmiTagSize + kSmiShiftSize - kPointerAlignment; }; typedef SmiTagging PlatformSmiTagging; const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; -const uintptr_t kEncodablePointerMask = - PlatformSmiTagging::kEncodablePointerMask; -const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; /** * This class exports constants and functionality from within v8 that @@ -4032,8 +4175,13 @@ class Internals { static const int kOddballKindOffset = 3 * kApiPointerSize; static const int kForeignAddressOffset = kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize; + static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; + static const int kContextHeaderSize = 2 * kApiPointerSize; + static const int kContextEmbedderDataIndex = 54; static const int kFullStringRepresentationMask = 0x07; + static const int kStringEncodingMask = 0x4; static const int kExternalTwoByteRepresentationTag = 0x02; + static const int kExternalAsciiRepresentationTag = 0x06; static const int kIsolateStateOffset = 0; static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize; @@ -4042,7 +4190,7 @@ class Internals { static const int kNullValueRootIndex = 7; static const int kTrueValueRootIndex = 8; static const int kFalseValueRootIndex = 9; - static const int kEmptySymbolRootIndex = 116; + static const int kEmptySymbolRootIndex = 119; static const int kJSObjectType = 0xaa; static const int kFirstNonstringType = 0x80; @@ -4052,85 +4200,80 @@ class Internals { static const int kUndefinedOddballKind = 5; static const int kNullOddballKind = 3; - static inline bool HasHeapObjectTag(internal::Object* value) { + V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) { return ((reinterpret_cast(value) & kHeapObjectTagMask) == kHeapObjectTag); } - static inline bool HasSmiTag(internal::Object* value) { - return ((reinterpret_cast(value) & kSmiTagMask) == kSmiTag); - } - - static inline int SmiValue(internal::Object* value) { + V8_INLINE(static int SmiValue(internal::Object* value)) { return PlatformSmiTagging::SmiToInt(value); } - static inline int GetInstanceType(internal::Object* obj) { + V8_INLINE(static int GetInstanceType(internal::Object* obj)) { typedef internal::Object O; O* map = ReadField(obj, kHeapObjectMapOffset); return ReadField(map, kMapInstanceTypeOffset); } - static inline int GetOddballKind(internal::Object* obj) { + V8_INLINE(static int GetOddballKind(internal::Object* obj)) { typedef internal::Object O; return SmiValue(ReadField(obj, kOddballKindOffset)); } - static inline void* GetExternalPointerFromSmi(internal::Object* value) { - const uintptr_t address = reinterpret_cast(value); - return reinterpret_cast(address >> kPointerToSmiShift); - } - - static inline void* GetExternalPointer(internal::Object* obj) { - if (HasSmiTag(obj)) { - return GetExternalPointerFromSmi(obj); - } else if (GetInstanceType(obj) == kForeignType) { - return ReadField(obj, kForeignAddressOffset); - } else { - return NULL; - } - } - - static inline bool IsExternalTwoByteString(int instance_type) { + V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) { int representation = (instance_type & kFullStringRepresentationMask); return representation == kExternalTwoByteRepresentationTag; } - static inline bool IsInitialized(v8::Isolate* isolate) { + V8_INLINE(static bool IsInitialized(v8::Isolate* isolate)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateStateOffset; return *reinterpret_cast(addr) == 1; } - static inline void SetEmbedderData(v8::Isolate* isolate, void* data) { + V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; *reinterpret_cast(addr) = data; } - static inline void* GetEmbedderData(v8::Isolate* isolate) { + V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; return *reinterpret_cast(addr); } - static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) { + V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate, + int index)) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; return reinterpret_cast(addr + index * kApiPointerSize); } template - static inline T ReadField(Object* ptr, int offset) { + V8_INLINE(static T ReadField(Object* ptr, int offset)) { uint8_t* addr = reinterpret_cast(ptr) + offset - kHeapObjectTag; return *reinterpret_cast(addr); } - static inline bool CanCastToHeapObject(void* o) { return false; } - static inline bool CanCastToHeapObject(Context* o) { return true; } - static inline bool CanCastToHeapObject(String* o) { return true; } - static inline bool CanCastToHeapObject(Object* o) { return true; } - static inline bool CanCastToHeapObject(Message* o) { return true; } - static inline bool CanCastToHeapObject(StackTrace* o) { return true; } - static inline bool CanCastToHeapObject(StackFrame* o) { return true; } + template + V8_INLINE(static T ReadEmbedderData(Context* context, int index)) { + typedef internal::Object O; + typedef internal::Internals I; + O* ctx = *reinterpret_cast(context); + int embedder_data_offset = I::kContextHeaderSize + + (internal::kApiPointerSize * I::kContextEmbedderDataIndex); + O* embedder_data = I::ReadField(ctx, embedder_data_offset); + int value_offset = + I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index); + return I::ReadField(embedder_data, value_offset); + } + + V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; } + V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; } + V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; } }; } // namespace internal @@ -4153,6 +4296,16 @@ Local Local::New(Handle that) { } +template + Local Local::New(Isolate* isolate, Handle that) { + if (that.IsEmpty()) return Local(); + T* that_ptr = *that; + internal::Object** p = reinterpret_cast(that_ptr); + return Local(reinterpret_cast(HandleScope::CreateHandle( + reinterpret_cast(isolate), *p))); +} + + template Persistent Persistent::New(Handle that) { if (that.IsEmpty()) return Persistent(); @@ -4161,6 +4314,21 @@ Persistent Persistent::New(Handle that) { } +template +bool Persistent::IsIndependent() const { + if (this->IsEmpty()) return false; + return V8::IsGlobalIndependent(reinterpret_cast(**this)); +} + + +template +bool Persistent::IsIndependent(Isolate* isolate) const { + if (this->IsEmpty()) return false; + return V8::IsGlobalIndependent(reinterpret_cast(isolate), + reinterpret_cast(**this)); +} + + template bool Persistent::IsNearDeath() const { if (this->IsEmpty()) return false; @@ -4175,6 +4343,14 @@ bool Persistent::IsWeak() const { } +template +bool Persistent::IsWeak(Isolate* isolate) const { + if (this->IsEmpty()) return false; + return V8::IsGlobalWeak(reinterpret_cast(isolate), + reinterpret_cast(**this)); +} + + template void Persistent::Dispose() { if (this->IsEmpty()) return; @@ -4182,6 +4358,14 @@ void Persistent::Dispose() { } +template +void Persistent::Dispose(Isolate* isolate) { + if (this->IsEmpty()) return; + V8::DisposeGlobal(reinterpret_cast(isolate), + reinterpret_cast(**this)); +} + + template Persistent::Persistent() : Handle() { } @@ -4192,6 +4376,15 @@ void Persistent::MakeWeak(void* parameters, WeakReferenceCallback callback) { callback); } +template +void Persistent::MakeWeak(Isolate* isolate, void* parameters, + WeakReferenceCallback callback) { + V8::MakeWeak(reinterpret_cast(isolate), + reinterpret_cast(**this), + parameters, + callback); +} + template void Persistent::ClearWeak() { V8::ClearWeak(reinterpret_cast(**this)); @@ -4202,11 +4395,33 @@ void Persistent::MarkIndependent() { V8::MarkIndependent(reinterpret_cast(**this)); } +template +void Persistent::MarkIndependent(Isolate* isolate) { + V8::MarkIndependent(reinterpret_cast(isolate), + reinterpret_cast(**this)); +} + +template +void Persistent::MarkPartiallyDependent() { + V8::MarkPartiallyDependent(reinterpret_cast(**this)); +} + +template +void Persistent::MarkPartiallyDependent(Isolate* isolate) { + V8::MarkPartiallyDependent(reinterpret_cast(isolate), + reinterpret_cast(**this)); +} + template void Persistent::SetWrapperClassId(uint16_t class_id) { V8::SetWrapperClassId(reinterpret_cast(**this), class_id); } +template +uint16_t Persistent::WrapperClassId() const { + return V8::GetWrapperClassId(reinterpret_cast(**this)); +} + Arguments::Arguments(internal::Object** implicit_args, internal::Object** values, int length, bool is_construct_call) @@ -4293,63 +4508,40 @@ void Template::Set(const char* name, v8::Handle value) { Local Object::GetInternalField(int index) { #ifndef V8_ENABLE_CHECKS - Local quick_result = UncheckedGetInternalField(index); - if (!quick_result.IsEmpty()) return quick_result; -#endif - return CheckedGetInternalField(index); -} - - -Local Object::UncheckedGetInternalField(int index) { typedef internal::Object O; typedef internal::Internals I; O* obj = *reinterpret_cast(this); + // Fast path: If the object is a plain JSObject, which is the common case, we + // know where to find the internal fields and can return the value directly. if (I::GetInstanceType(obj) == I::kJSObjectType) { - // If the object is a plain JSObject, which is the common case, - // we know where to find the internal fields and can return the - // value directly. int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); O* value = I::ReadField(obj, offset); O** result = HandleScope::CreateHandle(value); return Local(reinterpret_cast(result)); - } else { - return Local(); } -} - - -void* External::Unwrap(Handle obj) { -#ifdef V8_ENABLE_CHECKS - return FullUnwrap(obj); -#else - return QuickUnwrap(obj); #endif + return SlowGetInternalField(index); } -void* External::QuickUnwrap(Handle wrapper) { - typedef internal::Object O; - O* obj = *reinterpret_cast(const_cast(*wrapper)); - return internal::Internals::GetExternalPointer(obj); +void Object::SetPointerInInternalField(int index, void* value) { + SetInternalField(index, External::New(value)); } -void* Object::GetPointerFromInternalField(int index) { +void* Object::GetAlignedPointerFromInternalField(int index) { +#ifndef V8_ENABLE_CHECKS typedef internal::Object O; typedef internal::Internals I; - O* obj = *reinterpret_cast(this); - + // Fast path: If the object is a plain JSObject, which is the common case, we + // know where to find the internal fields and can return the value directly. if (I::GetInstanceType(obj) == I::kJSObjectType) { - // If the object is a plain JSObject, which is the common case, - // we know where to find the internal fields and can return the - // value directly. int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); - O* value = I::ReadField(obj, offset); - return I::GetExternalPointer(value); + return I::ReadField(obj, offset); } - - return SlowGetPointerFromInternalField(index); +#endif + return SlowGetAlignedPointerFromInternalField(index); } @@ -4388,6 +4580,26 @@ String::ExternalStringResource* String::GetExternalStringResource() const { } +String::ExternalStringResourceBase* String::GetExternalStringResourceBase( + String::Encoding* encoding_out) const { + typedef internal::Object O; + typedef internal::Internals I; + O* obj = *reinterpret_cast(const_cast(this)); + int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask; + *encoding_out = static_cast(type & I::kStringEncodingMask); + ExternalStringResourceBase* resource = NULL; + if (type == I::kExternalAsciiRepresentationTag || + type == I::kExternalTwoByteRepresentationTag) { + void* value = I::ReadField(obj, I::kStringResourceOffset); + resource = static_cast(value); + } +#ifdef V8_ENABLE_CHECKS + VerifyExternalStringResourceBase(resource, *encoding_out); +#endif + return resource; +} + + bool Value::IsUndefined() const { #ifdef V8_ENABLE_CHECKS return FullIsUndefined(); @@ -4521,6 +4733,16 @@ Function* Function::Cast(v8::Value* value) { } +Local External::Wrap(void* value) { + return External::New(value); +} + + +void* External::Unwrap(Handle obj) { + return External::Cast(*obj)->Value(); +} + + External* External::Cast(v8::Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -4597,6 +4819,37 @@ void* Isolate::GetData() { } +Local Context::GetData() { + return GetEmbedderData(0); +} + +void Context::SetData(Handle data) { + SetEmbedderData(0, data); +} + + +Local Context::GetEmbedderData(int index) { +#ifndef V8_ENABLE_CHECKS + typedef internal::Object O; + typedef internal::Internals I; + O** result = HandleScope::CreateHandle(I::ReadEmbedderData(this, index)); + return Local(reinterpret_cast(result)); +#else + return SlowGetEmbedderData(index); +#endif +} + + +void* Context::GetAlignedPointerFromEmbedderData(int index) { +#ifndef V8_ENABLE_CHECKS + typedef internal::Internals I; + return I::ReadEmbedderData(this, index); +#else + return SlowGetAlignedPointerFromEmbedderData(index); +#endif +} + + /** * \example shell.cc * A simple shell that takes a list of expressions on the diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 821ef75a76..62f404554b 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -72,7 +72,7 @@ int main(int argc, char* argv[]) { v8::HandleScope handle_scope; v8::Persistent context = CreateShellContext(); if (context.IsEmpty()) { - printf("Error creating context\n"); + fprintf(stderr, "Error creating context\n"); return 1; } context->Enter(); @@ -226,7 +226,8 @@ int RunMain(int argc, char* argv[]) { // alone JavaScript engines. continue; } else if (strncmp(str, "--", 2) == 0) { - printf("Warning: unknown flag %s.\nTry --help for options\n", str); + fprintf(stderr, + "Warning: unknown flag %s.\nTry --help for options\n", str); } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { // Execute argument given to -e option directly. v8::Handle file_name = v8::String::New("unnamed"); @@ -237,7 +238,7 @@ int RunMain(int argc, char* argv[]) { v8::Handle file_name = v8::String::New(str); v8::Handle source = ReadFile(str); if (source.IsEmpty()) { - printf("Error reading '%s'\n", str); + fprintf(stderr, "Error reading '%s'\n", str); continue; } if (!ExecuteString(source, file_name, false, true)) return 1; @@ -249,20 +250,20 @@ int RunMain(int argc, char* argv[]) { // The read-eval-execute loop of the shell. void RunShell(v8::Handle context) { - printf("V8 version %s [sample shell]\n", v8::V8::GetVersion()); + fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion()); static const int kBufferSize = 256; // Enter the execution environment before evaluating any code. v8::Context::Scope context_scope(context); v8::Local name(v8::String::New("(shell)")); while (true) { char buffer[kBufferSize]; - printf("> "); + fprintf(stderr, "> "); char* str = fgets(buffer, kBufferSize, stdin); if (str == NULL) break; v8::HandleScope handle_scope; ExecuteString(v8::String::New(str), name, true, true); } - printf("\n"); + fprintf(stderr, "\n"); } @@ -310,31 +311,31 @@ void ReportException(v8::TryCatch* try_catch) { if (message.IsEmpty()) { // V8 didn't provide any extra information about this error; just // print the exception. - printf("%s\n", exception_string); + fprintf(stderr, "%s\n", exception_string); } else { // Print (filename):(line number): (message). v8::String::Utf8Value filename(message->GetScriptResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); - printf("%s:%i: %s\n", filename_string, linenum, exception_string); + fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string); // Print line of source code. v8::String::Utf8Value sourceline(message->GetSourceLine()); const char* sourceline_string = ToCString(sourceline); - printf("%s\n", sourceline_string); + fprintf(stderr, "%s\n", sourceline_string); // Print wavy underline (GetUnderline is deprecated). int start = message->GetStartColumn(); for (int i = 0; i < start; i++) { - printf(" "); + fprintf(stderr, " "); } int end = message->GetEndColumn(); for (int i = start; i < end; i++) { - printf("^"); + fprintf(stderr, "^"); } - printf("\n"); + fprintf(stderr, "\n"); v8::String::Utf8Value stack_trace(try_catch->StackTrace()); if (stack_trace.length() > 0) { const char* stack_trace_string = ToCString(stack_trace); - printf("%s\n", stack_trace_string); + fprintf(stderr, "%s\n", stack_trace_string); } } } diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 9da6141c5b..efcaf8f294 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -42,15 +42,11 @@ namespace internal { template -static C* FindInPrototypeChain(Object* obj, bool* found_it) { - ASSERT(!*found_it); - Heap* heap = HEAP; - while (!Is(obj)) { - if (obj == heap->null_value()) return NULL; - obj = obj->GetPrototype(); +static C* FindInstanceOf(Object* obj) { + for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) { + if (Is(cur)) return C::cast(cur); } - *found_it = true; - return C::cast(obj); + return NULL; } @@ -81,10 +77,8 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) { MaybeObject* Accessors::ArrayGetLength(Object* object, void*) { // Traverse the prototype chain until we reach an array. - bool found_it = false; - JSArray* holder = FindInPrototypeChain(object, &found_it); - if (!found_it) return Smi::FromInt(0); - return holder->length(); + JSArray* holder = FindInstanceOf(object); + return holder == NULL ? Smi::FromInt(0) : holder->length(); } @@ -118,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { HandleScope scope(isolate); // Protect raw pointers. - Handle object_handle(object, isolate); + Handle array_handle(JSArray::cast(object), isolate); Handle value_handle(value, isolate); bool has_exception; @@ -128,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { if (has_exception) return Failure::Exception(); if (uint32_v->Number() == number_v->Number()) { - return Handle::cast(object_handle)->SetElementsLength(*uint32_v); + return array_handle->SetElementsLength(*uint32_v); } return isolate->Throw( *isolate->factory()->NewRangeError("invalid_array_length", @@ -448,15 +442,12 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = { MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { Heap* heap = Isolate::Current()->heap(); - bool found_it = false; - JSFunction* function = FindInPrototypeChain(object, &found_it); - if (!found_it) return heap->undefined_value(); + JSFunction* function = FindInstanceOf(object); + if (function == NULL) return heap->undefined_value(); while (!function->should_have_prototype()) { - found_it = false; - function = FindInPrototypeChain(object->GetPrototype(), - &found_it); + function = FindInstanceOf(function->GetPrototype()); // There has to be one because we hit the getter. - ASSERT(found_it); + ASSERT(function != NULL); } if (!function->has_prototype()) { @@ -474,25 +465,46 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, - Object* value, + Object* value_raw, void*) { - Heap* heap = object->GetHeap(); - bool found_it = false; - JSFunction* function = FindInPrototypeChain(object, &found_it); - if (!found_it) return heap->undefined_value(); - if (!function->should_have_prototype()) { + Isolate* isolate = object->GetIsolate(); + Heap* heap = isolate->heap(); + JSFunction* function_raw = FindInstanceOf(object); + if (function_raw == NULL) return heap->undefined_value(); + if (!function_raw->should_have_prototype()) { // Since we hit this accessor, object will have no prototype property. return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(), - value, + value_raw, NONE); } - Object* prototype; - { MaybeObject* maybe_prototype = function->SetPrototype(value); - if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; + HandleScope scope(isolate); + Handle function(function_raw, isolate); + Handle value(value_raw, isolate); + + Handle old_value; + bool is_observed = + FLAG_harmony_observation && + *function == object && + function->map()->is_observed(); + if (is_observed) { + if (function->has_prototype()) + old_value = handle(function->prototype(), isolate); + else + old_value = isolate->factory()->NewFunctionPrototype(function); + } + + Handle result; + MaybeObject* maybe_result = function->SetPrototype(*value); + if (!maybe_result->ToHandle(&result, isolate)) return maybe_result; + ASSERT(function->prototype() == *value); + + if (is_observed && !old_value->SameValue(*value)) { + JSObject::EnqueueChangeRecord( + function, "updated", isolate->factory()->prototype_symbol(), old_value); } - ASSERT(function->prototype() == value); - return function; + + return *function; } @@ -509,22 +521,20 @@ const AccessorDescriptor Accessors::FunctionPrototype = { MaybeObject* Accessors::FunctionGetLength(Object* object, void*) { - bool found_it = false; - JSFunction* function = FindInPrototypeChain(object, &found_it); - if (!found_it) return Smi::FromInt(0); + JSFunction* function = FindInstanceOf(object); + if (function == NULL) return Smi::FromInt(0); // Check if already compiled. - if (!function->shared()->is_compiled()) { - // If the function isn't compiled yet, the length is not computed - // correctly yet. Compile it now and return the right length. - HandleScope scope; - Handle handle(function); - if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) { - return Failure::Exception(); - } - return Smi::FromInt(handle->shared()->length()); - } else { + if (function->shared()->is_compiled()) { return Smi::FromInt(function->shared()->length()); } + // If the function isn't compiled yet, the length is not computed correctly + // yet. Compile it now and return the right length. + HandleScope scope; + Handle handle(function); + if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) { + return Smi::FromInt(handle->shared()->length()); + } + return Failure::Exception(); } @@ -541,10 +551,8 @@ const AccessorDescriptor Accessors::FunctionLength = { MaybeObject* Accessors::FunctionGetName(Object* object, void*) { - bool found_it = false; - JSFunction* holder = FindInPrototypeChain(object, &found_it); - if (!found_it) return HEAP->undefined_value(); - return holder->shared()->name(); + JSFunction* holder = FindInstanceOf(object); + return holder == NULL ? HEAP->undefined_value() : holder->shared()->name(); } @@ -589,9 +597,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { Isolate* isolate = Isolate::Current(); HandleScope scope(isolate); - bool found_it = false; - JSFunction* holder = FindInPrototypeChain(object, &found_it); - if (!found_it) return isolate->heap()->undefined_value(); + JSFunction* holder = FindInstanceOf(object); + if (holder == NULL) return isolate->heap()->undefined_value(); Handle function(holder, isolate); if (function->shared()->native()) return isolate->heap()->null_value(); @@ -664,19 +671,6 @@ const AccessorDescriptor Accessors::FunctionArguments = { // -static MaybeObject* CheckNonStrictCallerOrThrow( - Isolate* isolate, - JSFunction* caller) { - DisableAssertNoAllocation enable_allocation; - if (!caller->shared()->is_classic_mode()) { - return isolate->Throw( - *isolate->factory()->NewTypeError("strict_caller", - HandleVector(NULL, 0))); - } - return caller; -} - - class FrameFunctionIterator { public: FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise) @@ -727,9 +721,8 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { Isolate* isolate = Isolate::Current(); HandleScope scope(isolate); AssertNoAllocation no_alloc; - bool found_it = false; - JSFunction* holder = FindInPrototypeChain(object, &found_it); - if (!found_it) return isolate->heap()->undefined_value(); + JSFunction* holder = FindInstanceOf(object); + if (holder == NULL) return isolate->heap()->undefined_value(); if (holder->shared()->native()) return isolate->heap()->null_value(); Handle function(holder, isolate); @@ -764,7 +757,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { if (caller->shared()->bound()) { return isolate->heap()->null_value(); } - return CheckNonStrictCallerOrThrow(isolate, caller); + // Censor if the caller is not a classic mode function. + // Change from ES5, which used to throw, see: + // https://bugs.ecmascript.org/show_bug.cgi?id=310 + if (!caller->shared()->is_classic_mode()) { + return isolate->heap()->null_value(); + } + + return caller; } @@ -780,7 +780,7 @@ const AccessorDescriptor Accessors::FunctionCaller = { // -MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { +static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) { Object* current = receiver->GetPrototype(); while (current->IsJSObject() && JSObject::cast(current)->map()->is_hidden_prototype()) { @@ -790,12 +790,36 @@ MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { } -MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver, - Object* value, +MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { + return GetPrototypeSkipHiddenPrototypes(receiver); +} + + +MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw, + Object* value_raw, void*) { - const bool skip_hidden_prototypes = true; + const bool kSkipHiddenPrototypes = true; // To be consistent with other Set functions, return the value. - return receiver->SetPrototype(value, skip_hidden_prototypes); + if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed())) + return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes); + + Isolate* isolate = receiver_raw->GetIsolate(); + HandleScope scope(isolate); + Handle receiver(receiver_raw); + Handle value(value_raw); + Handle old_value(GetPrototypeSkipHiddenPrototypes(*receiver)); + + MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes); + Handle hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + Handle new_value(GetPrototypeSkipHiddenPrototypes(*receiver)); + if (!new_value->SameValue(*old_value)) { + JSObject::EnqueueChangeRecord(receiver, "prototype", + isolate->factory()->Proto_symbol(), + old_value); + } + return *hresult; } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index dcbc894574..95e5340a5a 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -630,7 +630,16 @@ void V8::MakeWeak(i::Object** object, void* parameters, i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "MakeWeak"); isolate->global_handles()->MakeWeak(object, parameters, - callback); + callback); +} + + +void V8::MakeWeak(i::Isolate* isolate, i::Object** object, + void* parameters, WeakReferenceCallback callback) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "MakeWeak"); + isolate->global_handles()->MakeWeak(object, parameters, + callback); } @@ -643,11 +652,48 @@ void V8::ClearWeak(i::Object** obj) { void V8::MarkIndependent(i::Object** object) { i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "MakeIndependent"); + LOG_API(isolate, "MarkIndependent"); isolate->global_handles()->MarkIndependent(object); } +void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "MarkIndependent"); + isolate->global_handles()->MarkIndependent(object); +} + + +void V8::MarkPartiallyDependent(i::Object** object) { + i::Isolate* isolate = i::Isolate::Current(); + LOG_API(isolate, "MarkPartiallyDependent"); + isolate->global_handles()->MarkPartiallyDependent(object); +} + + +void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "MarkPartiallyDependent"); + isolate->global_handles()->MarkPartiallyDependent(object); +} + + +bool V8::IsGlobalIndependent(i::Object** obj) { + i::Isolate* isolate = i::Isolate::Current(); + LOG_API(isolate, "IsGlobalIndependent"); + if (!isolate->IsInitialized()) return false; + return i::GlobalHandles::IsIndependent(obj); +} + + +bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "IsGlobalIndependent"); + if (!isolate->IsInitialized()) return false; + return i::GlobalHandles::IsIndependent(obj); +} + + bool V8::IsGlobalNearDeath(i::Object** obj) { i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "IsGlobalNearDeath"); @@ -664,6 +710,14 @@ bool V8::IsGlobalWeak(i::Object** obj) { } +bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "IsGlobalWeak"); + if (!isolate->IsInitialized()) return false; + return i::GlobalHandles::IsWeak(obj); +} + + void V8::DisposeGlobal(i::Object** obj) { i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "DisposeGlobal"); @@ -671,6 +725,14 @@ void V8::DisposeGlobal(i::Object** obj) { isolate->global_handles()->Destroy(obj); } + +void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "DisposeGlobal"); + if (!isolate->IsInitialized()) return; + isolate->global_handles()->Destroy(obj); +} + // --- H a n d l e s --- @@ -724,6 +786,12 @@ i::Object** HandleScope::CreateHandle(i::Object* value) { } +i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) { + ASSERT(isolate == i::Isolate::Current()); + return i::HandleScope::CreateHandle(value, isolate); +} + + i::Object** HandleScope::CreateHandle(i::HeapObject* value) { ASSERT(value->IsHeapObject()); return reinterpret_cast( @@ -765,36 +833,77 @@ void Context::Exit() { } -void Context::SetData(v8::Handle data) { - i::Handle env = Utils::OpenHandle(this); - i::Isolate* isolate = env->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Context::SetData()")) return; - i::Handle raw_data = Utils::OpenHandle(*data); - ASSERT(env->IsNativeContext()); - if (env->IsNativeContext()) { - env->set_data(*raw_data); - } +static void* DecodeSmiToAligned(i::Object* value, const char* location) { + ApiCheck(value->IsSmi(), location, "Not a Smi"); + return reinterpret_cast(value); } -v8::Local Context::GetData() { - i::Handle env = Utils::OpenHandle(this); - i::Isolate* isolate = env->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Context::GetData()")) { - return v8::Local(); - } - i::Object* raw_result = NULL; - ASSERT(env->IsNativeContext()); - if (env->IsNativeContext()) { - raw_result = env->data(); - } else { - return Local(); +static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) { + i::Smi* smi = reinterpret_cast(value); + ApiCheck(smi->IsSmi(), location, "Pointer is not aligned"); + return smi; +} + + +static i::Handle EmbedderDataFor(Context* context, + int index, + bool can_grow, + const char* location) { + i::Handle env = Utils::OpenHandle(context); + bool ok = !IsDeadCheck(env->GetIsolate(), location) && + ApiCheck(env->IsNativeContext(), location, "Not a native context") && + ApiCheck(index >= 0, location, "Negative index"); + if (!ok) return i::Handle(); + i::Handle data(env->embedder_data()); + if (index < data->length()) return data; + if (!can_grow) { + Utils::ReportApiFailure(location, "Index too large"); + return i::Handle(); } - i::Handle result(raw_result, isolate); + int new_size = i::Max(index, data->length() << 1) + 1; + data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size); + env->set_embedder_data(*data); + return data; +} + + +v8::Local Context::SlowGetEmbedderData(int index) { + const char* location = "v8::Context::GetEmbedderData()"; + i::Handle data = EmbedderDataFor(this, index, false, location); + if (data.is_null()) return Local(); + i::Handle result(data->get(index), data->GetIsolate()); return Utils::ToLocal(result); } +void Context::SetEmbedderData(int index, v8::Handle value) { + const char* location = "v8::Context::SetEmbedderData()"; + i::Handle data = EmbedderDataFor(this, index, true, location); + if (data.is_null()) return; + i::Handle val = Utils::OpenHandle(*value); + data->set(index, *val); + ASSERT_EQ(*Utils::OpenHandle(*value), + *Utils::OpenHandle(*GetEmbedderData(index))); +} + + +void* Context::SlowGetAlignedPointerFromEmbedderData(int index) { + const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()"; + i::Handle data = EmbedderDataFor(this, index, false, location); + if (data.is_null()) return NULL; + return DecodeSmiToAligned(data->get(index), location); +} + + +void Context::SetAlignedPointerInEmbedderData(int index, void* value) { + const char* location = "v8::Context::SetAlignedPointerInEmbedderData()"; + i::Handle data = EmbedderDataFor(this, index, true, location); + data->set(index, EncodeAlignedAsSmi(value, location)); + ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index)); +} + + i::Object** v8::HandleScope::RawClose(i::Object** value) { if (!ApiCheck(!is_closed_, "v8::HandleScope::Close()", @@ -816,7 +925,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) { } // Allocate a new handle on the previous handle block. - i::Handle handle(result); + i::Handle handle(result, isolate_); return handle.location(); } @@ -1151,7 +1260,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) { void FunctionTemplate::ReadOnlyPrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) { + if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) { return; } ENTER_V8(isolate); @@ -1595,6 +1704,8 @@ Local Script::Run() { ON_BAILOUT(isolate, "v8::Script::Run()", return Local()); LOG_API(isolate, "Script::Run"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -2193,7 +2304,7 @@ bool Value::IsExternal() const { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) { return false; } - return Utils::OpenHandle(this)->IsForeign(); + return Utils::OpenHandle(this)->IsExternal(); } @@ -2267,7 +2378,11 @@ static i::Object* LookupBuiltin(i::Isolate* isolate, static bool CheckConstructor(i::Isolate* isolate, i::Handle obj, const char* class_name) { - return obj->map()->constructor() == LookupBuiltin(isolate, class_name); + i::Object* constr = obj->map()->constructor(); + if (!constr->IsJSFunction()) return false; + i::JSFunction* func = i::JSFunction::cast(constr); + return func->shared()->native() && + constr == LookupBuiltin(isolate, class_name); } @@ -2422,8 +2537,7 @@ Local Value::ToInteger() const { void External::CheckCast(v8::Value* that) { if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return; - i::Handle obj = Utils::OpenHandle(that); - ApiCheck(obj->IsForeign(), + ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast()", "Could not convert to external"); } @@ -2768,6 +2882,7 @@ bool v8::Object::Set(v8::Handle key, v8::Handle value, i::Handle value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); i::Handle obj = i::SetProperty( + isolate, self, key_obj, value_obj, @@ -3322,7 +3437,7 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle key_symbol = FACTORY->LookupSymbol(key_obj); - i::Handle result(self->GetHiddenProperty(*key_symbol)); + i::Handle result(self->GetHiddenProperty(*key_symbol), isolate); if (result->IsUndefined()) return v8::Local(); return Utils::ToLocal(result); } @@ -3559,6 +3674,8 @@ Local Object::CallAsFunction(v8::Handle recv, return Local()); LOG_API(isolate, "Object::CallAsFunction"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); @@ -3590,6 +3707,8 @@ Local Object::CallAsConstructor(int argc, return Local()); LOG_API(isolate, "Object::CallAsConstructor"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); @@ -3632,6 +3751,8 @@ Local Function::NewInstance(int argc, return Local()); LOG_API(isolate, "Function::NewInstance"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); HandleScope scope; i::Handle function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); @@ -3650,6 +3771,8 @@ Local Function::Call(v8::Handle recv, int argc, ON_BAILOUT(isolate, "v8::Function::Call()", return Local()); LOG_API(isolate, "Function::Call"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -3693,8 +3816,9 @@ ScriptOrigin Function::GetScriptOrigin() const { i::Handle func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle script(i::Script::cast(func->shared()->script())); + i::Handle scriptName = GetScriptNameOrSourceURL(script); v8::ScriptOrigin origin( - Utils::ToLocal(i::Handle(script->name())), + Utils::ToLocal(scriptName), v8::Integer::New(script->line_offset()->value()), v8::Integer::New(script->column_offset()->value())); return origin; @@ -3757,7 +3881,7 @@ static int RecursivelySerializeToUtf8(i::String* string, int32_t* last_character) { int utf8_bytes = 0; while (true) { - if (string->IsAsciiRepresentation()) { + if (string->IsOneByteRepresentation()) { i::String::WriteToFlat(string, buffer, start, end); *last_character = unibrow::Utf16::kNoPreviousCharacter; return utf8_bytes + end - start; @@ -3857,7 +3981,7 @@ int String::WriteUtf8(char* buffer, FlattenString(str); // Flatten the string for efficiency. } int string_length = str->length(); - if (str->IsAsciiRepresentation()) { + if (str->IsOneByteRepresentation()) { int len; if (capacity == -1) { capacity = str->length() + 1; @@ -3991,7 +4115,7 @@ int String::WriteAscii(char* buffer, FlattenString(str); // Flatten the string for efficiency. } - if (str->IsAsciiRepresentation()) { + if (str->IsOneByteRepresentation()) { // WriteToFlat is faster than using the StringInputBuffer. if (length == -1) length = str->length() + 1; int len = i::Min(length, str->length() - start); @@ -4089,6 +4213,29 @@ void v8::String::VerifyExternalStringResource( CHECK_EQ(expected, value); } +void v8::String::VerifyExternalStringResourceBase( + v8::String::ExternalStringResourceBase* value, Encoding encoding) const { + i::Handle str = Utils::OpenHandle(this); + const v8::String::ExternalStringResourceBase* expected; + Encoding expectedEncoding; + if (i::StringShape(*str).IsExternalAscii()) { + const void* resource = + i::Handle::cast(str)->resource(); + expected = reinterpret_cast(resource); + expectedEncoding = ASCII_ENCODING; + } else if (i::StringShape(*str).IsExternalTwoByte()) { + const void* resource = + i::Handle::cast(str)->resource(); + expected = reinterpret_cast(resource); + expectedEncoding = TWO_BYTE_ENCODING; + } else { + expected = NULL; + expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING + : TWO_BYTE_ENCODING; + } + CHECK_EQ(expected, value); + CHECK_EQ(expectedEncoding, encoding); +} const v8::String::ExternalAsciiStringResource* v8::String::GetExternalAsciiStringResource() const { @@ -4163,75 +4310,65 @@ int v8::Object::InternalFieldCount() { } -Local v8::Object::CheckedGetInternalField(int index) { +static bool InternalFieldOK(i::Handle obj, + int index, + const char* location) { + return !IsDeadCheck(obj->GetIsolate(), location) && + ApiCheck(index < obj->GetInternalFieldCount(), + location, + "Internal field out of bounds"); +} + + +Local v8::Object::SlowGetInternalField(int index) { i::Handle obj = Utils::OpenHandle(this); - if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) { - return Local(); - } - if (!ApiCheck(index < obj->GetInternalFieldCount(), - "v8::Object::GetInternalField()", - "Reading internal field out of bounds")) { - return Local(); - } - i::Handle value(obj->GetInternalField(index)); - Local result = Utils::ToLocal(value); -#ifdef DEBUG - Local unchecked = UncheckedGetInternalField(index); - ASSERT(unchecked.IsEmpty() || (unchecked == result)); -#endif - return result; + const char* location = "v8::Object::GetInternalField()"; + if (!InternalFieldOK(obj, index, location)) return Local(); + i::Handle value(obj->GetInternalField(index), obj->GetIsolate()); + return Utils::ToLocal(value); } void v8::Object::SetInternalField(int index, v8::Handle value) { i::Handle obj = Utils::OpenHandle(this); - i::Isolate* isolate = obj->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) { - return; - } - if (!ApiCheck(index < obj->GetInternalFieldCount(), - "v8::Object::SetInternalField()", - "Writing internal field out of bounds")) { - return; - } - ENTER_V8(isolate); + const char* location = "v8::Object::SetInternalField()"; + if (!InternalFieldOK(obj, index, location)) return; i::Handle val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); + ASSERT_EQ(value, GetInternalField(index)); } -static bool CanBeEncodedAsSmi(void* ptr) { - const uintptr_t address = reinterpret_cast(ptr); - return ((address & i::kEncodablePointerMask) == 0); +void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) { + i::Handle obj = Utils::OpenHandle(this); + const char* location = "v8::Object::GetAlignedPointerFromInternalField()"; + if (!InternalFieldOK(obj, index, location)) return NULL; + return DecodeSmiToAligned(obj->GetInternalField(index), location); } -static i::Smi* EncodeAsSmi(void* ptr) { - ASSERT(CanBeEncodedAsSmi(ptr)); - const uintptr_t address = reinterpret_cast(ptr); - i::Smi* result = reinterpret_cast(address << i::kPointerToSmiShift); - ASSERT(i::Internals::HasSmiTag(result)); - ASSERT_EQ(result, i::Smi::FromInt(result->value())); - ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result)); - return result; +void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { + i::Handle obj = Utils::OpenHandle(this); + const char* location = "v8::Object::SetAlignedPointerInInternalField()"; + if (!InternalFieldOK(obj, index, location)) return; + obj->SetInternalField(index, EncodeAlignedAsSmi(value, location)); + ASSERT_EQ(value, GetAlignedPointerFromInternalField(index)); } -void v8::Object::SetPointerInInternalField(int index, void* value) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - if (CanBeEncodedAsSmi(value)) { - Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value)); - } else { - HandleScope scope; - i::Handle foreign = - isolate->factory()->NewForeign( - reinterpret_cast(value), i::TENURED); - if (!foreign.is_null()) { - Utils::OpenHandle(this)->SetInternalField(index, *foreign); - } - } - ASSERT_EQ(value, GetPointerFromInternalField(index)); +static void* ExternalValue(i::Object* obj) { + // Obscure semantics for undefined, but somehow checked in our unit tests... + if (obj->IsUndefined()) return NULL; + i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0); + return i::Foreign::cast(foreign)->foreign_address(); +} + + +void* Object::GetPointerFromInternalField(int index) { + i::Handle obj = Utils::OpenHandle(this); + const char* location = "v8::Object::GetPointerFromInternalField()"; + if (!InternalFieldOK(obj, index, location)) return NULL; + return ExternalValue(obj->GetInternalField(index)); } @@ -4286,6 +4423,7 @@ bool v8::V8::Dispose() { HeapStatistics::HeapStatistics(): total_heap_size_(0), total_heap_size_executable_(0), + total_physical_size_(0), used_heap_size_(0), heap_size_limit_(0) { } @@ -4295,6 +4433,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { // Isolate is unitialized thus heap is not configured yet. heap_statistics->set_total_heap_size(0); heap_statistics->set_total_heap_size_executable(0); + heap_statistics->set_total_physical_size(0); heap_statistics->set_used_heap_size(0); heap_statistics->set_heap_size_limit(0); return; @@ -4304,6 +4443,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->set_total_heap_size(heap->CommittedMemory()); heap_statistics->set_total_heap_size_executable( heap->CommittedMemoryExecutable()); + heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory()); heap_statistics->set_used_heap_size(heap->SizeOfObjects()); heap_statistics->set_heap_size_limit(heap->MaxReserved()); } @@ -4316,6 +4456,30 @@ void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) { } +void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId"); + + i::AssertNoAllocation no_allocation; + + class VisitorAdapter : public i::ObjectVisitor { + public: + explicit VisitorAdapter(PersistentHandleVisitor* visitor) + : visitor_(visitor) {} + virtual void VisitPointers(i::Object** start, i::Object** end) { + UNREACHABLE(); + } + virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) { + visitor_->VisitPersistentHandle(ToApi(i::Handle(p)), + class_id); + } + private: + PersistentHandleVisitor* visitor_; + } visitor_adapter(visitor); + isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter); +} + + bool v8::V8::IdleNotification(int hint) { // Returning true tells the caller that it need not // continue to call IdleNotification. @@ -4516,13 +4680,14 @@ v8::Local Context::GetCalling() { v8::Local Context::Global() { - if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) { + i::Isolate* isolate = i::Isolate::Current(); + if (IsDeadCheck(isolate, "v8::Context::Global()")) { return Local(); } i::Object** ctx = reinterpret_cast(this); i::Handle context = i::Handle::cast(i::Handle(ctx)); - i::Handle global(context->global_proxy()); + i::Handle global(context->global_proxy(), isolate); return Utils::ToLocal(i::Handle::cast(global)); } @@ -4579,11 +4744,32 @@ bool Context::IsCodeGenerationFromStringsAllowed() { } +void Context::SetErrorMessageForCodeGenerationFromStrings( + Handle error) { + i::Isolate* isolate = i::Isolate::Current(); + if (IsDeadCheck(isolate, + "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) { + return; + } + ENTER_V8(isolate); + i::Object** ctx = reinterpret_cast(this); + i::Handle context = + i::Handle::cast(i::Handle(ctx)); + i::Handle error_handle = Utils::OpenHandle(*error); + context->set_error_message_for_code_gen_from_strings(*error_handle); +} + + void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) { i::GlobalHandles::SetWrapperClassId(global_handle, class_id); } +uint16_t V8::GetWrapperClassId(internal::Object** global_handle) { + return i::GlobalHandles::GetWrapperClassId(global_handle); +} + + Local ObjectTemplate::NewInstance() { i::Isolate* isolate = i::Isolate::Current(); ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()", @@ -4622,74 +4808,20 @@ bool FunctionTemplate::HasInstance(v8::Handle value) { } -static Local ExternalNewImpl(void* data) { - return Utils::ToLocal(FACTORY->NewForeign(static_cast(data))); -} - -static void* ExternalValueImpl(i::Handle obj) { - return reinterpret_cast(i::Foreign::cast(*obj)->foreign_address()); -} - - -Local v8::External::Wrap(void* data) { - i::Isolate* isolate = i::Isolate::Current(); - STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); - EnsureInitializedForIsolate(isolate, "v8::External::Wrap()"); - LOG_API(isolate, "External::Wrap"); - ENTER_V8(isolate); - - v8::Local result = CanBeEncodedAsSmi(data) - ? Utils::ToLocal(i::Handle(EncodeAsSmi(data))) - : v8::Local(ExternalNewImpl(data)); - - ASSERT_EQ(data, Unwrap(result)); - return result; -} - - -void* v8::Object::SlowGetPointerFromInternalField(int index) { - i::Handle obj = Utils::OpenHandle(this); - i::Object* value = obj->GetInternalField(index); - if (value->IsSmi()) { - return i::Internals::GetExternalPointerFromSmi(value); - } else if (value->IsForeign()) { - return reinterpret_cast(i::Foreign::cast(value)->foreign_address()); - } else { - return NULL; - } -} - - -void* v8::External::FullUnwrap(v8::Handle wrapper) { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0; - i::Handle obj = Utils::OpenHandle(*wrapper); - void* result; - if (obj->IsSmi()) { - result = i::Internals::GetExternalPointerFromSmi(*obj); - } else if (obj->IsForeign()) { - result = ExternalValueImpl(obj); - } else { - result = NULL; - } - ASSERT_EQ(result, QuickUnwrap(wrapper)); - return result; -} - - -Local v8::External::New(void* data) { - STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); +Local v8::External::New(void* value) { + STATIC_ASSERT(sizeof(value) == sizeof(i::Address)); i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::External::New()"); LOG_API(isolate, "External::New"); ENTER_V8(isolate); - return ExternalNewImpl(data); + i::Handle external = isolate->factory()->NewExternal(value); + return Utils::ExternalToLocal(external); } void* External::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0; - i::Handle obj = Utils::OpenHandle(this); - return ExternalValueImpl(obj); + if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL; + return ExternalValue(*Utils::OpenHandle(this)); } @@ -5155,24 +5287,39 @@ Local v8::Number::New(double value) { Local v8::Integer::New(int32_t value) { i::Isolate* isolate = i::Isolate::UncheckedCurrent(); EnsureInitializedForIsolate(isolate, "v8::Integer::New()"); + return v8::Integer::New(value, reinterpret_cast(isolate)); +} + + +Local Integer::NewFromUnsigned(uint32_t value) { + i::Isolate* isolate = i::Isolate::Current(); + EnsureInitializedForIsolate(isolate, "v8::Integer::NewFromUnsigned()"); + return Integer::NewFromUnsigned(value, reinterpret_cast(isolate)); +} + + +Local v8::Integer::New(int32_t value, Isolate* isolate) { + i::Isolate* internal_isolate = reinterpret_cast(isolate); + ASSERT(internal_isolate->IsInitialized()); if (i::Smi::IsValid(value)) { return Utils::IntegerToLocal(i::Handle(i::Smi::FromInt(value), - isolate)); + internal_isolate)); } - ENTER_V8(isolate); - i::Handle result = isolate->factory()->NewNumber(value); + ENTER_V8(internal_isolate); + i::Handle result = internal_isolate->factory()->NewNumber(value); return Utils::IntegerToLocal(result); } -Local Integer::NewFromUnsigned(uint32_t value) { +Local v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) { + i::Isolate* internal_isolate = reinterpret_cast(isolate); + ASSERT(internal_isolate->IsInitialized()); bool fits_into_int32_t = (value & (1 << 31)) == 0; if (fits_into_int32_t) { - return Integer::New(static_cast(value)); + return Integer::New(static_cast(value), isolate); } - i::Isolate* isolate = i::Isolate::Current(); - ENTER_V8(isolate); - i::Handle result = isolate->factory()->NewNumber(value); + ENTER_V8(internal_isolate); + i::Handle result = internal_isolate->factory()->NewNumber(value); return Utils::IntegerToLocal(result); } @@ -5182,19 +5329,14 @@ void V8::IgnoreOutOfMemoryException() { } -bool V8::AddMessageListener(MessageCallback that, Handle data) { +bool V8::AddMessageListener(MessageCallback that) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()"); ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false); ENTER_V8(isolate); i::HandleScope scope(isolate); NeanderArray listeners(isolate->factory()->message_listeners()); - NeanderObject obj(2); - obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that))); - obj.set(1, data.IsEmpty() ? - isolate->heap()->undefined_value() : - *Utils::OpenHandle(*data)); - listeners.add(obj.value()); + listeners.add(isolate->factory()->NewForeign(FUNCTION_ADDR(that))); return true; } @@ -5209,8 +5351,7 @@ void V8::RemoveMessageListeners(MessageCallback that) { for (int i = 0; i < listeners.length(); i++) { if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones - NeanderObject listener(i::JSObject::cast(listeners.get(i))); - i::Handle callback_obj(i::Foreign::cast(listener.get(0))); + i::Handle callback_obj(i::Foreign::cast(listeners.get(i))); if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) { listeners.set(i, isolate->heap()->undefined_value()); } @@ -5250,13 +5391,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { SetAddHistogramSampleFunction(callback); } -void V8::EnableSlidingStateWindow() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return; - isolate->logger()->EnableSlidingStateWindow(); -} - - void V8::SetFailedAccessCheckCallbackFunction( FailedAccessCheckCallback callback) { i::Isolate* isolate = i::Isolate::Current(); @@ -5266,6 +5400,7 @@ void V8::SetFailedAccessCheckCallbackFunction( isolate->SetFailedAccessCheckCallback(callback); } + void V8::AddObjectGroup(Persistent* objects, size_t length, RetainedObjectInfo* info) { @@ -5277,6 +5412,19 @@ void V8::AddObjectGroup(Persistent* objects, } +void V8::AddObjectGroup(Isolate* exportedIsolate, + Persistent* objects, + size_t length, + RetainedObjectInfo* info) { + i::Isolate* isolate = reinterpret_cast(exportedIsolate); + ASSERT(isolate == i::Isolate::Current()); + if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return; + STATIC_ASSERT(sizeof(Persistent) == sizeof(i::Object**)); + isolate->global_handles()->AddObjectGroup( + reinterpret_cast(objects), length, info); +} + + void V8::AddImplicitReferences(Persistent parent, Persistent* children, size_t length) { @@ -6287,7 +6435,8 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle value) { const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, HeapSnapshot::Type type, - ActivityControl* control) { + ActivityControl* control, + ObjectNameResolver* resolver) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; @@ -6300,7 +6449,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, } return reinterpret_cast( i::HeapProfiler::TakeSnapshot( - *Utils::OpenHandle(*title), internal_type, control)); + *Utils::OpenHandle(*title), internal_type, control, resolver)); } @@ -6411,6 +6560,7 @@ void Testing::PrepareStressRun(int run) { void Testing::DeoptimizeAll() { + i::HandleScope scope; internal::Deoptimizer::DeoptimizeAll(); } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 7197b6cb54..ca2240b640 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -201,8 +201,6 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); - static inline Local ToLocal( - v8::internal::Handle obj); static inline Local MessageToLocal( v8::internal::Handle obj); static inline Local StackTraceToLocal( @@ -225,6 +223,8 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); + static inline Local ExternalToLocal( + v8::internal::Handle obj); #define DECLARE_OPEN_HANDLE(From, To) \ static inline v8::internal::Handle \ @@ -268,7 +268,6 @@ MAKE_TO_LOCAL(ToLocal, String, String) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSArray, Array) -MAKE_TO_LOCAL(ToLocal, Foreign, External) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) @@ -280,6 +279,7 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame) MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) +MAKE_TO_LOCAL(ExternalToLocal, JSObject, External) #undef MAKE_TO_LOCAL diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index c47c094756..acd61feff8 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -75,7 +75,7 @@ Address RelocInfo::target_address_address() { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); - return reinterpret_cast
(Assembler::target_address_address_at(pc_)); + return reinterpret_cast
(Assembler::target_pointer_address_at(pc_)); } @@ -97,25 +97,30 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { Object* RelocInfo::target_object() { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return Memory::Object_at(Assembler::target_address_address_at(pc_)); + return reinterpret_cast(Assembler::target_pointer_at(pc_)); } Handle RelocInfo::target_object_handle(Assembler* origin) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_)); + return Handle(reinterpret_cast( + Assembler::target_pointer_at(pc_))); } Object** RelocInfo::target_object_address() { + // Provide a "natural pointer" to the embedded object, + // which can be de-referenced during heap iteration. ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return reinterpret_cast(Assembler::target_address_address_at(pc_)); + reconstructed_obj_ptr_ = + reinterpret_cast(Assembler::target_pointer_at(pc_)); + return &reconstructed_obj_ptr_; } void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); + Assembler::set_target_pointer_at(pc_, reinterpret_cast
(target)); if (mode == UPDATE_WRITE_BARRIER && host() != NULL && target->IsHeapObject()) { @@ -127,7 +132,8 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { Address* RelocInfo::target_reference_address() { ASSERT(rmode_ == EXTERNAL_REFERENCE); - return reinterpret_cast(Assembler::target_address_address_at(pc_)); + reconstructed_adr_ptr_ = Assembler::target_address_at(pc_); + return &reconstructed_adr_ptr_; } @@ -159,6 +165,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, } +static const int kNoCodeAgeSequenceLength = 3; + +Code* RelocInfo::code_age_stub() { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + return Code::GetCodeFromTargetAddress( + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1))); +} + + +void RelocInfo::set_code_age_stub(Code* stub) { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1)) = + stub->instruction_start(); +} + + Address RelocInfo::call_address() { // The 2 instructions offset assumes patched debug break slot or return // sequence. @@ -232,6 +256,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && @@ -258,6 +284,8 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitGlobalPropertyCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -326,7 +354,7 @@ void Assembler::emit(Instr x) { } -Address Assembler::target_address_address_at(Address pc) { +Address Assembler::target_pointer_address_at(Address pc) { Address target_pc = pc; Instr instr = Memory::int32_at(target_pc); // If we have a bx instruction, the instruction before the bx is @@ -356,8 +384,63 @@ Address Assembler::target_address_address_at(Address pc) { } -Address Assembler::target_address_at(Address pc) { - return Memory::Address_at(target_address_address_at(pc)); +Address Assembler::target_pointer_at(Address pc) { + if (IsMovW(Memory::int32_at(pc))) { + ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); + Instruction* instr = Instruction::At(pc); + Instruction* next_instr = Instruction::At(pc + kInstrSize); + return reinterpret_cast
( + (next_instr->ImmedMovwMovtValue() << 16) | + instr->ImmedMovwMovtValue()); + } + return Memory::Address_at(target_pointer_address_at(pc)); +} + + +Address Assembler::target_address_from_return_address(Address pc) { + // Returns the address of the call target from the return address that will + // be returned to after a call. +#ifdef USE_BLX + // Call sequence on V7 or later is : + // movw ip, #... @ call address low 16 + // movt ip, #... @ call address high 16 + // blx ip + // @ return address + // Or pre-V7 or cases that need frequent patching: + // ldr ip, [pc, #...] @ call address + // blx ip + // @ return address + Address candidate = pc - 2 * Assembler::kInstrSize; + Instr candidate_instr(Memory::int32_at(candidate)); + if (IsLdrPcImmediateOffset(candidate_instr)) { + return candidate; + } + candidate = pc - 3 * Assembler::kInstrSize; + ASSERT(IsMovW(Memory::int32_at(candidate)) && + IsMovT(Memory::int32_at(candidate + kInstrSize))); + return candidate; +#else + // Call sequence is: + // mov lr, pc + // ldr pc, [pc, #...] @ call address + // @ return address + return pc - kInstrSize; +#endif +} + + +Address Assembler::return_address_from_call_start(Address pc) { +#ifdef USE_BLX + if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) { + return pc + kInstrSize * 2; + } else { + ASSERT(IsMovW(Memory::int32_at(pc))); + ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); + return pc + kInstrSize * 3; + } +#else + return pc + kInstrSize; +#endif } @@ -373,17 +456,53 @@ void Assembler::set_external_target_at(Address constant_pool_entry, } +static Instr EncodeMovwImmediate(uint32_t immediate) { + ASSERT(immediate < 0x10000); + return ((immediate & 0xf000) << 4) | (immediate & 0xfff); +} + + +void Assembler::set_target_pointer_at(Address pc, Address target) { + if (IsMovW(Memory::int32_at(pc))) { + ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); + uint32_t* instr_ptr = reinterpret_cast(pc); + uint32_t immediate = reinterpret_cast(target); + uint32_t intermediate = instr_ptr[0]; + intermediate &= ~EncodeMovwImmediate(0xFFFF); + intermediate |= EncodeMovwImmediate(immediate & 0xFFFF); + instr_ptr[0] = intermediate; + intermediate = instr_ptr[1]; + intermediate &= ~EncodeMovwImmediate(0xFFFF); + intermediate |= EncodeMovwImmediate(immediate >> 16); + instr_ptr[1] = intermediate; + ASSERT(IsMovW(Memory::int32_at(pc))); + ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); + CPU::FlushICache(pc, 2 * kInstrSize); + } else { + ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc))); + Memory::Address_at(target_pointer_address_at(pc)) = target; + // Intuitively, we would think it is necessary to always flush the + // instruction cache after patching a target address in the code as follows: + // CPU::FlushICache(pc, sizeof(target)); + // However, on ARM, no instruction is actually patched in the case + // of embedded constants of the form: + // ldr ip, [pc, #...] + // since the instruction accessing this address in the constant pool remains + // unchanged. + } +} + + +Address Assembler::target_address_at(Address pc) { + return target_pointer_at(pc); +} + + void Assembler::set_target_address_at(Address pc, Address target) { - Memory::Address_at(target_address_address_at(pc)) = target; - // Intuitively, we would think it is necessary to flush the instruction cache - // after patching a target address in the code as follows: - // CPU::FlushICache(pc, sizeof(target)); - // However, on ARM, no instruction was actually patched by the assignment - // above; the target address is not part of an instruction, it is patched in - // the constant pool and is read via a data access; the instruction accessing - // this address in the constant pool remains unchanged. + set_target_pointer_at(pc, target); } + } } // namespace v8::internal #endif // V8_ARM_ASSEMBLER_ARM_INL_H_ diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 30a8830c9e..47ea0e2066 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -77,6 +77,9 @@ static unsigned CpuFeaturesImpliedByCompiler() { #endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) // && !defined(__SOFTFP__) #endif // _arm__ + if (answer & (1u << ARMv7)) { + answer |= 1u << UNALIGNED_ACCESSES; + } return answer; } @@ -110,6 +113,14 @@ void CpuFeatures::Probe() { if (FLAG_enable_armv7) { supported_ |= 1u << ARMv7; } + + if (FLAG_enable_sudiv) { + supported_ |= 1u << SUDIV; + } + + if (FLAG_enable_movw_movt) { + supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + } #else // __arm__ // Probe for additional features not already known to be available. if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { @@ -125,6 +136,19 @@ void CpuFeatures::Probe() { found_by_runtime_probing_ |= 1u << ARMv7; } + if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { + found_by_runtime_probing_ |= 1u << SUDIV; + } + + if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { + found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; + } + + if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && + OS::ArmCpuHasFeature(ARMv7)) { + found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + } + supported_ |= found_by_runtime_probing_; #endif @@ -294,46 +318,11 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff; const Instr kLdrStrOffsetMask = 0x00000fff; -// Spare buffer. -static const int kMinimalBufferSize = 4*KB; - - -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), recorded_ast_id_(TypeFeedbackId::None()), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code), - predictable_code_size_(false) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) { - buffer_ = NewArray(buffer_size); - } else { - buffer_ = static_cast(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - - // Set up buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + positions_recorder_(this) { + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); num_pending_reloc_info_ = 0; next_buffer_check_ = 0; const_pool_blocked_nesting_ = 0; @@ -346,14 +335,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) Assembler::~Assembler() { ASSERT(const_pool_blocked_nesting_ == 0); - if (own_buffer_) { - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } } @@ -715,12 +696,6 @@ void Assembler::next(Label* L) { } -static Instr EncodeMovwImmediate(uint32_t immediate) { - ASSERT(immediate < 0x10000); - return ((immediate & 0xf000) << 4) | (immediate & 0xfff); -} - - // Low-level code emission routines depending on the addressing mode. // If this returns true then you have to use the rotate_imm and immed_8 // that it returns, because it may have already changed the instruction @@ -785,7 +760,7 @@ static bool fits_shifter(uint32_t imm32, // if they can be encoded in the ARM's 12 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly // encoded. -bool Operand::must_use_constant_pool(const Assembler* assembler) const { +bool Operand::must_output_reloc_info(const Assembler* assembler) const { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG if (!Serializer::enabled()) { @@ -801,25 +776,28 @@ bool Operand::must_use_constant_pool(const Assembler* assembler) const { } +static bool use_movw_movt(const Operand& x, const Assembler* assembler) { + if (Assembler::use_immediate_embedded_pointer_loads(assembler)) { + return true; + } + if (x.must_output_reloc_info(assembler)) { + return false; + } + return CpuFeatures::IsSupported(ARMv7); +} + + bool Operand::is_single_instruction(const Assembler* assembler, Instr instr) const { if (rm_.is_valid()) return true; uint32_t dummy1, dummy2; - if (must_use_constant_pool(assembler) || + if (must_output_reloc_info(assembler) || !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { // The immediate operand cannot be encoded as a shifter operand, or use of // constant pool is required. For a mov instruction not setting the // condition code additional instruction conventions can be used. if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - if (must_use_constant_pool(assembler) || - !CpuFeatures::IsSupported(ARMv7)) { - // mov instruction will be an ldr from constant pool (one instruction). - return true; - } else { - // mov instruction will be a mov or movw followed by movt (two - // instructions). - return false; - } + return !use_movw_movt(*this, assembler); } else { // If this is not a mov or mvn instruction there will always an additional // instructions - either mov or ldr. The mov might actually be two @@ -835,6 +813,29 @@ bool Operand::is_single_instruction(const Assembler* assembler, } +void Assembler::move_32_bit_immediate(Condition cond, + Register rd, + SBit s, + const Operand& x) { + if (rd.code() != pc.code() && s == LeaveCC) { + if (use_movw_movt(x, this)) { + if (x.must_output_reloc_info(this)) { + RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); + // Make sure the movw/movt doesn't get separated. + BlockConstPoolFor(2); + } + emit(cond | 0x30*B20 | rd.code()*B12 | + EncodeMovwImmediate(x.imm32_ & 0xffff)); + movt(rd, static_cast(x.imm32_) >> 16, cond); + return; + } + } + + RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); + ldr(rd, MemOperand(pc, 0), cond); +} + + void Assembler::addrmod1(Instr instr, Register rn, Register rd, @@ -845,7 +846,7 @@ void Assembler::addrmod1(Instr instr, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (x.must_use_constant_pool(this) || + if (x.must_output_reloc_info(this) || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. @@ -854,24 +855,19 @@ void Assembler::addrmod1(Instr instr, CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed Condition cond = Instruction::ConditionField(instr); if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - if (x.must_use_constant_pool(this) || - !CpuFeatures::IsSupported(ARMv7)) { - RecordRelocInfo(x.rmode_, x.imm32_); - ldr(rd, MemOperand(pc, 0), cond); - } else { - // Will probably use movw, will certainly not use constant pool. - mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond); - movt(rd, static_cast(x.imm32_) >> 16, cond); - } + move_32_bit_immediate(cond, rd, LeaveCC, x); } else { - // If this is not a mov or mvn instruction we may still be able to avoid - // a constant pool entry by using mvn or movw. - if (!x.must_use_constant_pool(this) && - (instr & kMovMvnMask) != kMovMvnPattern) { - mov(ip, x, LeaveCC, cond); - } else { - RecordRelocInfo(x.rmode_, x.imm32_); + if ((instr & kMovMvnMask) == kMovMvnPattern) { + // Moves need to use a constant pool entry. + RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); ldr(ip, MemOperand(pc, 0), cond); + } else if (x.must_output_reloc_info(this)) { + // Otherwise, use most efficient form of fetching from constant pool. + move_32_bit_immediate(cond, ip, LeaveCC, x); + } else { + // If this is not a mov or mvn instruction we may still be able to + // avoid a constant pool entry by using mvn or movw. + mov(ip, x, LeaveCC, cond); } addrmod1(instr, rn, rd, Operand(ip)); } @@ -1178,6 +1174,9 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { ASSERT(immediate < 0x10000); + // May use movw if supported, but on unsupported platforms will try to use + // equivalent rotated immed_8 value and other tricks before falling back to a + // constant pool load. mov(reg, Operand(immediate), LeaveCC, cond); } @@ -1207,6 +1206,22 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, } +void Assembler::mls(Register dst, Register src1, Register src2, Register srcA, + Condition cond) { + ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); + emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 | + src2.code()*B8 | B7 | B4 | src1.code()); +} + + +void Assembler::sdiv(Register dst, Register src1, Register src2, + Condition cond) { + ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); + emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 | + src2.code()*B8 | B4 | src1.code()); +} + + void Assembler::mul(Register dst, Register src1, Register src2, SBit s, Condition cond) { ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); @@ -1391,7 +1406,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Immediate. uint32_t rotate_imm; uint32_t immed_8; - if (src.must_use_constant_pool(this) || + if (src.must_output_reloc_info(this) || !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { // Immediate operand cannot be encoded, load it first to register ip. RecordRelocInfo(src.rmode_, src.imm32_); @@ -1826,7 +1841,7 @@ void Assembler::vstr(const SwVfpRegister src, const Condition cond) { ASSERT(!operand.rm().is_valid()); ASSERT(operand.am_ == Offset); - vldr(src, operand.rn(), operand.offset(), cond); + vstr(src, operand.rn(), operand.offset(), cond); } @@ -1975,6 +1990,7 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { void Assembler::vmov(const DwVfpRegister dst, double imm, + const Register scratch, const Condition cond) { // Dd = immediate // Instruction details available in ARM DDI 0406B, A8-640. @@ -1989,22 +2005,22 @@ void Assembler::vmov(const DwVfpRegister dst, // using vldr from a constant pool. uint32_t lo, hi; DoubleAsTwoUInt32(imm, &lo, &hi); + mov(ip, Operand(lo)); - if (lo == hi) { - // If the lo and hi parts of the double are equal, the literal is easier - // to create. This is the case with 0.0. - mov(ip, Operand(lo)); - vmov(dst, ip, ip); - } else { + if (scratch.is(no_reg)) { // Move the low part of the double into the lower of the corresponsing S // registers of D register dst. - mov(ip, Operand(lo)); vmov(dst.low(), ip, cond); // Move the high part of the double into the higher of the corresponsing S // registers of D register dst. mov(ip, Operand(hi)); vmov(dst.high(), ip, cond); + } else { + // Move the low and high parts of the double to a D register in one + // instruction. + mov(scratch, Operand(hi)); + vmov(dst, ip, scratch, cond); } } } @@ -2333,6 +2349,20 @@ void Assembler::vmul(const DwVfpRegister dst, } +void Assembler::vmla(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond) { + // Instruction details available in ARM DDI 0406C.b, A8-892. + // cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) | + // Vm(3-0) + unsigned x = (cond | 0x1C*B23 | src1.code()*B16 | + dst.code()*B12 | 0x5*B9 | B8 | src2.code()); + emit(x); +} + + void Assembler::vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, @@ -2408,15 +2438,35 @@ void Assembler::vsqrt(const DwVfpRegister dst, // Pseudo instructions. void Assembler::nop(int type) { - // This is mov rx, rx. - ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. + // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes + // some of the CPU's pipeline and has to issue. Older ARM chips simply used + // MOV Rx, Rx as NOP and it performs better even in newer CPUs. + // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode + // a type. + ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. emit(al | 13*B21 | type*B12 | type); } +bool Assembler::IsMovT(Instr instr) { + instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions + ((kNumRegisters-1)*B12) | // mask out register + EncodeMovwImmediate(0xFFFF)); // mask out immediate value + return instr == 0x34*B20; +} + + +bool Assembler::IsMovW(Instr instr) { + instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions + ((kNumRegisters-1)*B12) | // mask out destination + EncodeMovwImmediate(0xFFFF)); // mask out immediate value + return instr == 0x30*B20; +} + + bool Assembler::IsNop(Instr instr, int type) { + ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. // Check for mov rx, rx where x = type. - ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop. return instr == (al | 13*B21 | type*B12 | type); } @@ -2532,18 +2582,21 @@ void Assembler::dd(uint32_t data) { } -void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { +void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, + UseConstantPoolMode mode) { // We do not try to reuse pool constants. RelocInfo rinfo(pc_, rmode, data, NULL); if (((rmode >= RelocInfo::JS_RETURN) && (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || - (rmode == RelocInfo::CONST_POOL)) { + (rmode == RelocInfo::CONST_POOL) || + mode == DONT_USE_CONSTANT_POOL) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsJSReturn(rmode) || RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode) - || RelocInfo::IsConstPool(rmode)); + || RelocInfo::IsConstPool(rmode) + || mode == DONT_USE_CONSTANT_POOL); // These modes do not need an entry in the constant pool. } else { ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); @@ -2648,9 +2701,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { b(&after_pool); } - // Put down constant pool marker "Undefined instruction" as specified by - // A5.6 (ARMv7) Instruction set encoding. - emit(kConstantPoolMarker | num_pending_reloc_info_); + // Put down constant pool marker "Undefined instruction". + emit(kConstantPoolMarker | + EncodeConstantPoolLength(num_pending_reloc_info_)); // Emit constant pool entries. for (int i = 0; i < num_pending_reloc_info_; i++) { @@ -2662,17 +2715,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { Instr instr = instr_at(rinfo.pc()); // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. - ASSERT(IsLdrPcImmediateOffset(instr) && - GetLdrRegisterImmediateOffset(instr) == 0); - - int delta = pc_ - rinfo.pc() - kPcLoadDelta; - // 0 is the smallest delta: - // ldr rd, [pc, #0] - // constant pool marker - // data - ASSERT(is_uint12(delta)); - - instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); + if (IsLdrPcImmediateOffset(instr) && + GetLdrRegisterImmediateOffset(instr) == 0) { + int delta = pc_ - rinfo.pc() - kPcLoadDelta; + // 0 is the smallest delta: + // ldr rd, [pc, #0] + // constant pool marker + // data + ASSERT(is_uint12(delta)); + + instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); + } else { + ASSERT(IsMovW(instr)); + } emit(rinfo.data()); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 7f2ce30aee..3b9bb804fd 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -425,7 +425,7 @@ class Operand BASE_EMBEDDED { // actual instruction to use is required for this calculation. For other // instructions instr is ignored. bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const; - bool must_use_constant_pool(const Assembler* assembler) const; + bool must_output_reloc_info(const Assembler* assembler) const; inline int32_t immediate() const { ASSERT(!rm_.is_valid()); @@ -511,6 +511,10 @@ class CpuFeatures : public AllStatic { ASSERT(initialized_); if (f == VFP3 && !FLAG_enable_vfp3) return false; if (f == VFP2 && !FLAG_enable_vfp2) return false; + if (f == SUDIV && !FLAG_enable_sudiv) return false; + if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { + return false; + } return (supported_ & (1u << f)) != 0; } @@ -643,15 +647,7 @@ class Assembler : public AssemblerBase { // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. Assembler(Isolate* isolate, void* buffer, int buffer_size); - ~Assembler(); - - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - // Avoids using instructions that vary in size in unpredictable ways between - // the snapshot and the running VM. This is needed by the full compiler so - // that it can recompile code with debug support and fix the PC. - void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + virtual ~Assembler(); // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -685,13 +681,25 @@ class Assembler : public AssemblerBase { void label_at_put(Label* L, int at_offset); // Return the address in the constant pool of the code target address used by - // the branch/call instruction at pc. - INLINE(static Address target_address_address_at(Address pc)); + // the branch/call instruction at pc, or the object in a mov. + INLINE(static Address target_pointer_address_at(Address pc)); + + // Read/Modify the pointer in the branch/call/move instruction at pc. + INLINE(static Address target_pointer_at(Address pc)); + INLINE(static void set_target_pointer_at(Address pc, Address target)); // Read/Modify the code target address in the branch/call instruction at pc. INLINE(static Address target_address_at(Address pc)); INLINE(static void set_target_address_at(Address pc, Address target)); + // Return the code target address at a call site from the return address + // of that call in the instruction stream. + INLINE(static Address target_address_from_return_address(Address pc)); + + // Given the address of the beginning of a call, return the address + // in the instruction stream that the call will return from. + INLINE(static Address return_address_from_call_start(Address pc)); + // This sets the branch destination (which is in the constant pool on ARM). // This is for calls and branches within generated code. inline static void deserialization_set_special_target_at( @@ -710,22 +718,6 @@ class Assembler : public AssemblerBase { // Size of an instruction. static const int kInstrSize = sizeof(Instr); - // Distance between the instruction referring to the address of the call - // target and the return address. -#ifdef USE_BLX - // Call sequence is: - // ldr ip, [pc, #...] @ call address - // blx ip - // @ return address - static const int kCallTargetAddressOffset = 2 * kInstrSize; -#else - // Call sequence is: - // mov lr, pc - // ldr pc, [pc, #...] @ call address - // @ return address - static const int kCallTargetAddressOffset = kInstrSize; -#endif - // Distance between start of patched return sequence and the emitted address // to jump to. #ifdef USE_BLX @@ -754,6 +746,12 @@ class Assembler : public AssemblerBase { static const int kPatchDebugBreakSlotAddressOffset = kInstrSize; #endif +#ifdef USE_BLX + static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize; +#else + static const int kPatchDebugBreakSlotReturnOffset = kInstrSize; +#endif + // Difference between address of current opcode and value read from pc // register. static const int kPcLoadDelta = 8; @@ -869,6 +867,12 @@ class Assembler : public AssemblerBase { void mla(Register dst, Register src1, Register src2, Register srcA, SBit s = LeaveCC, Condition cond = al); + void mls(Register dst, Register src1, Register src2, Register srcA, + Condition cond = al); + + void sdiv(Register dst, Register src1, Register src2, + Condition cond = al); + void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC, Condition cond = al); @@ -1053,6 +1057,7 @@ class Assembler : public AssemblerBase { void vmov(const DwVfpRegister dst, double imm, + const Register scratch = no_reg, const Condition cond = al); void vmov(const SwVfpRegister dst, const SwVfpRegister src, @@ -1121,6 +1126,10 @@ class Assembler : public AssemblerBase { const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond = al); + void vmla(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond = al); void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, @@ -1172,7 +1181,19 @@ class Assembler : public AssemblerBase { // Jump unconditionally to given label. void jmp(Label* L) { b(L, al); } - bool predictable_code_size() const { return predictable_code_size_; } + static bool use_immediate_embedded_pointer_loads( + const Assembler* assembler) { +#ifdef USE_BLX + return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && + (assembler == NULL || !assembler->predictable_code_size()); +#else + // If not using BLX, all loads from the constant pool cannot be immediate, + // because the ldr pc, [pc + #xxxx] used for calls must be a single + // instruction and cannot be easily distinguished out of context from + // other loads that could use movw/movt. + return false; +#endif + } // Check the code size generated from label to here. int SizeOfCodeGeneratedSince(Label* label) { @@ -1255,8 +1276,6 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); - int pc_offset() const { return pc_ - buffer_; } - PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions @@ -1294,12 +1313,16 @@ class Assembler : public AssemblerBase { static Register GetCmpImmediateRegister(Instr instr); static int GetCmpImmediateRawImmediate(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); + static bool IsMovT(Instr instr); + static bool IsMovW(Instr instr); // Constants in pools are accessed via pc relative addressing, which can // reach +/-4KB thereby defining a maximum distance between the instruction // and the accessed constant. static const int kMaxDistToPool = 4*KB; static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize; + STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) == + kMaxNumPendingRelocInfo); // Postpone the generation of the constant pool for the specified number of // instructions. @@ -1314,8 +1337,6 @@ class Assembler : public AssemblerBase { // the relocation info. TypeFeedbackId recorded_ast_id_; - bool emit_debug_code() const { return emit_debug_code_; } - int buffer_space() const { return reloc_info_writer.pos() - pc_; } // Decode branch instruction at pos and return branch target pos @@ -1357,13 +1378,6 @@ class Assembler : public AssemblerBase { } private: - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - int next_buffer_check_; // pc offset of next buffer check // Code generation @@ -1372,7 +1386,6 @@ class Assembler : public AssemblerBase { // not have to check for overflow. The same is true for writes of large // relocation info entries. static const int kGap = 32; - byte* pc_; // the program counter; moves forward // Constant pool generation // Pools are emitted in the instruction stream, preferably after unconditional @@ -1432,6 +1445,12 @@ class Assembler : public AssemblerBase { void GrowBuffer(); inline void emit(Instr x); + // 32-bit immediate values + void move_32_bit_immediate(Condition cond, + Register rd, + SBit s, + const Operand& x); + // Instruction generation void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); void addrmod2(Instr instr, Register rd, const MemOperand& x); @@ -1445,8 +1464,14 @@ class Assembler : public AssemblerBase { void link_to(Label* L, Label* appendix); void next(Label* L); + enum UseConstantPoolMode { + USE_CONSTANT_POOL, + DONT_USE_CONSTANT_POOL + }; + // Record reloc info for current pc_ - void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0, + UseConstantPoolMode mode = USE_CONSTANT_POOL); friend class RegExpMacroAssemblerARM; friend class RelocInfo; @@ -1454,10 +1479,6 @@ class Assembler : public AssemblerBase { friend class BlockConstPoolScope; PositionsRecorder positions_recorder_; - - bool emit_debug_code_; - bool predictable_code_size_; - friend class PositionsRecorder; friend class EnsureSpace; }; diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 2d1d7b1199..24d14e8c8a 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1226,6 +1226,39 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // The following registers must be saved and restored when calling through to + // the runtime: + // r0 - contains return address (beginning of patch sequence) + // r1 - function object + FrameScope scope(masm, StackFrame::MANUAL); + __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + __ PrepareCallCFunction(1, 0, r1); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 1); + __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + __ mov(pc, r0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 5bb2116263..9484f85f97 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -41,8 +41,7 @@ namespace internal { static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond, - bool never_nan_nan); + Condition cond); static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, Register rhs, @@ -627,24 +626,6 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, } -void FloatingPointHelper::LoadOperands( - MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* slow) { - - // Load right operand (r0) to d6 or r2/r3. - LoadNumber(masm, destination, - r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); - - // Load left operand (r1) to d7 or r0/r1. - LoadNumber(masm, destination, - r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); -} - - void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Destination destination, Register object, @@ -655,11 +636,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Register scratch1, Register scratch2, Label* not_number) { - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); Label is_smi, done; @@ -716,11 +695,9 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, Register scratch3, DwVfpRegister double_scratch, Label* not_number) { - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); Label done; Label not_in_int32_range; @@ -752,13 +729,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, - Register dst1, - Register dst2, + Register dst_mantissa, + Register dst_exponent, Register scratch2, SwVfpRegister single_scratch) { ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst1)); - ASSERT(!int_scratch.is(dst2)); + ASSERT(!int_scratch.is(dst_mantissa)); + ASSERT(!int_scratch.is(dst_exponent)); Label done; @@ -767,56 +744,57 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, __ vmov(single_scratch, int_scratch); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { - __ vmov(dst1, dst2, double_dst); + __ vmov(dst_mantissa, dst_exponent, double_dst); } } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst2 | dst1 | + // | dst_exponent | dst_mantissa | // | s | exp | mantissa | // Check for zero. __ cmp(int_scratch, Operand::Zero()); - __ mov(dst2, int_scratch); - __ mov(dst1, int_scratch); + __ mov(dst_exponent, int_scratch); + __ mov(dst_mantissa, int_scratch); __ b(eq, &done); // Preload the sign of the value. - __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); + __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); // Get mantissa[51:20]. // Get the position of the first set bit. - __ CountLeadingZeros(dst1, int_scratch, scratch2); - __ rsb(dst1, dst1, Operand(31)); + __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); + __ rsb(dst_mantissa, dst_mantissa, Operand(31)); // Set the exponent. - __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst2, scratch2, scratch2, + __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst_exponent, scratch2, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); + __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); - __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), + SetCC); __ b(mi, &fewer_than_20_useful_bits); // Set the higher 20 bits of the mantissa. - __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); + __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst1, Operand(int_scratch, LSL, scratch2)); + __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); __ b(&done); __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst2, dst2, scratch2); + __ orr(dst_exponent, dst_exponent, scratch2); // Set dst1 to 0. - __ mov(dst1, Operand::Zero()); + __ mov(dst_mantissa, Operand::Zero()); } __ bind(&done); } @@ -826,8 +804,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Register object, Destination destination, DwVfpRegister double_dst, - Register dst1, - Register dst2, + DwVfpRegister double_scratch, + Register dst_mantissa, + Register dst_exponent, Register heap_number_map, Register scratch1, Register scratch2, @@ -843,16 +822,14 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotSmi(object, &obj_is_not_smi); __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, - scratch2, single_scratch); + ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, + dst_exponent, scratch2, single_scratch); __ b(&done); __ bind(&obj_is_not_smi); - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); // Load the number. @@ -863,36 +840,62 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); __ EmitVFPTruncate(kRoundToZero, - single_scratch, - double_dst, scratch1, + double_dst, scratch2, + double_scratch, kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. __ b(ne, not_int32); if (destination == kCoreRegisters) { - __ vmov(dst1, dst2, double_dst); + __ vmov(dst_mantissa, dst_exponent, double_dst); } } else { ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers.. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + // Load the double value in the destination registers. + bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); + if (save_registers) { + // Save both output registers, because the other one probably holds + // an important value too. + __ Push(dst_exponent, dst_mantissa); + } + __ Ldrd(dst_mantissa, dst_exponent, + FieldMemOperand(object, HeapNumber::kValueOffset)); // Check for 0 and -0. - __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst2)); + Label zero; + __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst_mantissa)); __ cmp(scratch1, Operand::Zero()); - __ b(eq, &done); + __ b(eq, &zero); // Check that the value can be exactly represented by a 32-bit integer. // Jump to not_int32 if that's not the case. - DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); + Label restore_input_and_miss; + DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, + &restore_input_and_miss); - // dst1 and dst2 were trashed. Reload the double value. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + // dst_* were trashed. Reload the double value. + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ Ldrd(dst_mantissa, dst_exponent, + FieldMemOperand(object, HeapNumber::kValueOffset)); + __ b(&done); + + __ bind(&restore_input_and_miss); + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ b(not_int32); + + __ bind(&zero); + if (save_registers) { + __ Drop(2); + } } __ bind(&done); @@ -906,7 +909,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - DwVfpRegister double_scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, Label* not_int32) { ASSERT(!dst.is(object)); ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); @@ -914,38 +918,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, !scratch1.is(scratch3) && !scratch2.is(scratch3)); - Label done; + Label done, maybe_undefined; __ UntagAndJumpIfSmi(dst, object, &done); - if (FLAG_debug_code) { - __ AbortIfNotRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - } - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + __ AssertRootValue(heap_number_map, + Heap::kHeapNumberMapRootIndex, + "HeapNumberMap register clobbered."); + + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); // Object is a heap number. // Convert the floating point value to a 32-bit integer. if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); - SwVfpRegister single_scratch = double_scratch.low(); + // Load the double value. __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset); + __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); __ EmitVFPTruncate(kRoundToZero, - single_scratch, - double_scratch, + dst, + double_scratch0, scratch1, - scratch2, + double_scratch1, kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. __ b(ne, not_int32); - // Get the result in the destination register. - __ vmov(dst, single_scratch); - } else { // Load the double value in the destination registers. __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); @@ -973,20 +973,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, __ tst(scratch1, Operand(HeapNumber::kSignMask)); __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); } + __ b(&done); + + __ bind(&maybe_undefined); + __ CompareRoot(object, Heap::kUndefinedValueRootIndex); + __ b(ne, not_int32); + // |undefined| is truncated to 0. + __ mov(dst, Operand(Smi::FromInt(0))); + // Fall through. __ bind(&done); } void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, + Register src_exponent, + Register src_mantissa, Register dst, Register scratch, Label* not_int32) { // Get exponent alone in scratch. __ Ubfx(scratch, - src1, + src_exponent, HeapNumber::kExponentShift, HeapNumber::kExponentBits); @@ -1006,11 +1014,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Another way to put it is that if (exponent - signbit) > 30 then the // number cannot be represented as an int32. Register tmp = dst; - __ sub(tmp, scratch, Operand(src1, LSR, 31)); + __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); __ cmp(tmp, Operand(30)); __ b(gt, not_int32); // - Bits [21:0] in the mantissa are not null. - __ tst(src2, Operand(0x3fffff)); + __ tst(src_mantissa, Operand(0x3fffff)); __ b(ne, not_int32); // Otherwise the exponent needs to be big enough to shift left all the @@ -1021,19 +1029,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, - src2, + src_mantissa, HeapNumber::kMantissaBitsInTopWord, 32 - HeapNumber::kMantissaBitsInTopWord); __ orr(dst, dst, - Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); // Create the mask and test the lower bits (of the higher bits). __ rsb(scratch, scratch, Operand(32)); - __ mov(src2, Operand(1)); - __ mov(src1, Operand(src2, LSL, scratch)); - __ sub(src1, src1, Operand(1)); - __ tst(dst, src1); + __ mov(src_mantissa, Operand(1)); + __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); + __ sub(src_exponent, src_exponent, Operand(1)); + __ tst(dst, src_exponent); __ b(ne, not_int32); } @@ -1157,48 +1165,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // for "identity and not NaN". static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond, - bool never_nan_nan) { + Condition cond) { Label not_identical; Label heap_number, return_equal; __ cmp(r0, r1); __ b(ne, ¬_identical); - // The two objects are identical. If we know that one of them isn't NaN then - // we now know they test equal. - if (cond != eq || !never_nan_nan) { - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cond == lt || cond == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); + // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cond == lt || cond == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, slow); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cond != eq) { + __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cond != eq) { - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); - __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cond == le || cond == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cond == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); - } - __ Ret(); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cond == le || cond == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cond == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); } + __ Ret(); } } } @@ -1213,47 +1216,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } __ Ret(); - if (cond != eq || !never_nan_nan) { - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cond != lt && cond != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cond != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cond == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. - } + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cond != lt && cond != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cond != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cond == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. } - __ Ret(); } - // No fall through here. + __ Ret(); } + // No fall through here. __ bind(¬_identical); } @@ -1687,42 +1688,60 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -// On entry lhs_ and rhs_ are the values to be compared. +static void ICCompareStub_CheckInputType(MacroAssembler* masm, + Register input, + Register scratch, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::HEAP_NUMBER) { + __ JumpIfSmi(input, &ok); + __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, + DONT_DO_SMI_CHECK); + } + // We could be strict about symbol/string here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +// On entry r1 and r2 are the values to be compared. // On exit r0 is 0, positive or negative to indicate the result of // the comparison. -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { + Register lhs = r1; + Register rhs = r0; + Condition cc = GetCondition(); + + Label miss; + ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); + ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); Label slow; // Call builtin. Label not_smis, both_loaded_as_doubles, lhs_not_nan; - if (include_smi_compare_) { - Label not_two_smis, smi_done; - __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, ¬_two_smis); - __ mov(r1, Operand(r1, ASR, 1)); - __ sub(r0, r1, Operand(r0, ASR, 1)); - __ Ret(); - __ bind(¬_two_smis); - } else if (FLAG_debug_code) { - __ orr(r2, r1, r0); - __ tst(r2, Operand(kSmiTagMask)); - __ Assert(ne, "CompareStub: unexpected smi operands."); - } + Label not_two_smis, smi_done; + __ orr(r2, r1, r0); + __ JumpIfNotSmi(r2, ¬_two_smis); + __ mov(r1, Operand(r1, ASR, 1)); + __ sub(r0, r1, Operand(r0, ASR, 1)); + __ Ret(); + __ bind(¬_two_smis); // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + EmitIdenticalObjectComparison(masm, &slow, cc); // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs_, Operand(rhs_)); + __ and_(r2, lhs, Operand(rhs)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. @@ -1733,7 +1752,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // comparison. If VFP3 is supported the double values of the numbers have // been loaded into d7 and d6. Otherwise, the double values have been loaded // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); + EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if @@ -1756,7 +1775,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // If one of the sides was a NaN then the v flag is set. Load r0 with // whatever it takes to make the comparison fail, since comparisons with NaN // always fail. - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { __ mov(r0, Operand(GREATER)); } else { __ mov(r0, Operand(LESS)); @@ -1765,19 +1784,19 @@ void CompareStub::Generate(MacroAssembler* masm) { } else { // Checks for NaN in the doubles we have loaded. Can return the answer or // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc_); + EmitNanCheck(masm, &lhs_not_nan, cc); // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); + EmitTwoNonNanDoubleComparison(masm, cc); } __ bind(¬_smis); // At this point we know we are dealing with two different objects, // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict_) { + if (strict()) { // This returns non-equal for some object types, or falls through if it // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); } Label check_for_symbols; @@ -1787,8 +1806,8 @@ void CompareStub::Generate(MacroAssembler* masm) { // that case. If the inputs are not doubles then jumps to check_for_symbols. // In this case r2 will contain the type of rhs_. Never falls through. EmitCheckForTwoHeapNumbers(masm, - lhs_, - rhs_, + lhs, + rhs, &both_loaded_as_doubles, &check_for_symbols, &flat_string_check); @@ -1796,31 +1815,31 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&check_for_symbols); // In the strict case the EmitStrictTwoHeapObjectCompare already took care of // symbols. - if (cc_ == eq && !strict_) { + if (cc == eq && !strict()) { // Returns an answer for two symbols or two detectable objects. // Otherwise jumps to string case or not both strings case. // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow); } // Check for both being sequential ASCII strings, and inline if that is the // case. __ bind(&flat_string_check); - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); - if (cc_ == eq) { + if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, - lhs_, - rhs_, + lhs, + rhs, r2, r3, r4); } else { StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, + lhs, + rhs, r2, r3, r4, @@ -1830,18 +1849,18 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&slow); - __ Push(lhs_, rhs_); + __ Push(lhs, rhs); // Figure out which native to call and setup the arguments. Builtins::JavaScript native; - if (cc_ == eq) { - native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc == eq) { + native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { native = Builtins::COMPARE; int ncr; // NaN compare result - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { ncr = GREATER; } else { - ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ASSERT(cc == gt || cc == ge); // remaining cases ncr = LESS; } __ mov(r0, Operand(Smi::FromInt(ncr))); @@ -1851,6 +1870,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(native, JUMP_FUNCTION); + + __ bind(&miss); + GenerateMiss(masm); } @@ -2334,20 +2356,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } +void BinaryOpStub::Initialize() { + platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); +} + + void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(r1, r0); __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ mov(r1, Operand(Smi::FromInt(op_))); - __ mov(r0, Operand(Smi::FromInt(operands_type_))); - __ Push(r2, r1, r0); + __ push(r2); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } @@ -2358,59 +2383,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - - switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); -} - - -void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { +void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, + Token::Value op) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2420,7 +2394,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); Label not_smi_result; - switch (op_) { + switch (op) { case Token::ADD: __ add(right, left, Operand(right), SetCC); // Add optimistically. __ Ret(vc); @@ -2535,10 +2509,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { } -void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required) { +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode); + + +void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + bool smi_operands, + Label* not_numbers, + Label* gc_required, + Label* miss, + Token::Value op, + OverwriteMode mode) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2546,15 +2534,21 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, Register scratch3 = r4; ASSERT(smi_operands || (not_numbers != NULL)); - if (smi_operands && FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); + if (smi_operands) { + __ AssertSmi(left); + __ AssertSmi(right); + } + if (left_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, miss); + } + if (right_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, miss); } Register heap_number_map = r6; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - switch (op_) { + switch (op) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -2564,25 +2558,44 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = CpuFeatures::IsSupported(VFP2) && - op_ != Token::MOD ? + op != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; // Allocate new heap number for result. Register result = r5; - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); // Load the operands. if (smi_operands) { FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); } else { - FloatingPointHelper::LoadOperands(masm, - destination, - heap_number_map, - scratch1, - scratch2, - not_numbers); + // Load right operand to d7 or r2/r3. + if (right_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, right, destination, d7, d8, r2, r3, heap_number_map, + scratch1, scratch2, s0, miss); + } else { + Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss + : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, right, d7, r2, r3, heap_number_map, + scratch1, scratch2, fail); + } + // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it + // jumps to |miss|. + if (left_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, left, destination, d6, d8, r0, r1, heap_number_map, + scratch1, scratch2, s0, miss); + } else { + Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss + : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, left, d6, r0, r1, heap_number_map, + scratch1, scratch2, fail); + } } // Calculate the result. @@ -2591,7 +2604,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // d6: Left value // d7: Right value CpuFeatures::Scope scope(VFP2); - switch (op_) { + switch (op) { case Token::ADD: __ vadd(d5, d6, d7); break; @@ -2615,7 +2628,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, } else { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op_, + op, result, scratch1); if (FLAG_debug_code) { @@ -2656,7 +2669,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, } Label result_not_a_smi; - switch (op_) { + switch (op) { case Token::BIT_OR: __ orr(r2, r3, Operand(r2)); break; @@ -2707,8 +2720,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } else { - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, + mode); } // r2: Answer as signed int32. @@ -2723,7 +2737,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // mentioned above SHR needs to always produce a positive result. CpuFeatures::Scope scope(VFP2); __ vmov(s0, r2); - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ vcvt_f64_u32(d0, s0); } else { __ vcvt_f64_s32(d0, s0); @@ -2748,12 +2762,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Generate the smi code. If the operation on smis are successful this return is // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the lable gc_required. -void BinaryOpStub::GenerateSmiCode( +// heap number cannot be allocated the code jumps to the label gc_required. +void BinaryOpStub_GenerateSmiCode( MacroAssembler* masm, Label* use_runtime, Label* gc_required, - SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + Token::Value op, + BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, + OverwriteMode mode) { Label not_smis; Register left = r1; @@ -2766,12 +2782,14 @@ void BinaryOpStub::GenerateSmiCode( __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op); // If heap number results are possible generate the result in an allocated // heap number. - if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { - GenerateFPOperation(masm, true, use_runtime, gc_required); + if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { + BinaryOpStub_GenerateFPOperation( + masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, + use_runtime, gc_required, ¬_smis, op, mode); } __ bind(¬_smis); } @@ -2783,14 +2801,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - GenerateSmiCode(masm, - &call_runtime, - &call_runtime, - ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, + mode_); } // Code falls through if the result is not returned as either a smi or heap @@ -2798,23 +2816,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); -} - - void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -2843,14 +2852,13 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::INT32); + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); Register left = r1; Register right = r0; Register scratch1 = r7; Register scratch2 = r9; DwVfpRegister double_scratch = d0; - SwVfpRegister single_scratch = s3; Register heap_number_result = no_reg; Register heap_number_map = r6; @@ -2866,7 +2874,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label skip; __ orr(scratch1, left, right); __ JumpIfNotSmi(scratch1, &skip); - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op_); // Fall through if the result is not a smi. __ bind(&skip); @@ -2876,6 +2884,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + if (left_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, &transition); + } + if (right_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, &transition); + } // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. @@ -2888,6 +2905,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { right, destination, d7, + d8, r2, r3, heap_number_map, @@ -2899,6 +2917,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { left, destination, d6, + d8, r4, r5, heap_number_map, @@ -2934,10 +2953,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // transition. __ EmitVFPTruncate(kRoundToZero, - single_scratch, - d5, scratch1, - scratch2); + d5, + scratch2, + d8); if (result_type_ <= BinaryOpIC::INT32) { // If the ne condition is set, result does @@ -2946,7 +2965,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Check if the result fits in a smi. - __ vmov(scratch1, single_scratch); __ add(scratch2, scratch1, Operand(0x40000000), SetCC); // If not try to return a heap number. __ b(mi, &return_heap_number); @@ -2973,12 +2991,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { : BinaryOpIC::INT32)) { // We are using vfp registers so r5 is available. heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); __ vstr(d5, r0, HeapNumber::kValueOffset); __ mov(r0, heap_number_result); @@ -2997,12 +3016,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Allocate a heap number to store the result. heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime, + mode_); // Load the left value from the value saved on the stack. __ Pop(r1, r0); @@ -3041,6 +3061,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch2, scratch3, d0, + d1, &transition); FloatingPointHelper::LoadNumberAsInt32(masm, right, @@ -3050,6 +3071,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch2, scratch3, d0, + d1, &transition); // The ECMA-262 standard specifies that, for shift operations, only the @@ -3105,12 +3127,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); @@ -3154,6 +3177,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3192,20 +3216,32 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime; - GenerateFPOperation(masm, false, &call_runtime, &call_runtime); + Label call_runtime, transition; + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &transition, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime; + Label call_runtime, call_string_add_or_runtime, transition; - GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); - GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { @@ -3213,6 +3249,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3248,61 +3285,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required) { +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode) { // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(r0) && !result.is(r1)); - if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { + if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { Label skip_allocation, allocated; - Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; + Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; // If the overwritable operand is already an object, we skip the // allocation of a heap number. __ JumpIfNotSmi(overwritable_operand, &skip_allocation); @@ -3315,7 +3311,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, __ mov(result, Operand(overwritable_operand)); __ bind(&allocated); } else { - ASSERT(mode_ == NO_OVERWRITE); + ASSERT(mode == NO_OVERWRITE); __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } @@ -3444,8 +3440,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { ExternalReference(RuntimeFunction(), masm->isolate()); __ TailCallExternalReference(runtime_function, 1, 1); } else { - ASSERT(CpuFeatures::IsSupported(VFP3)); - CpuFeatures::Scope scope(VFP3); + ASSERT(CpuFeatures::IsSupported(VFP2)); + CpuFeatures::Scope scope(VFP2); Label no_update; Label skip_cache; @@ -3636,13 +3632,13 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label not_plus_half; // Test for 0.5. - __ vmov(double_scratch, 0.5); + __ vmov(double_scratch, 0.5, scratch); __ VFPCompareAndSetFlags(double_exponent, double_scratch); __ b(ne, ¬_plus_half); // Calculates square root of base. Check for the special case of // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). - __ vmov(double_scratch, -V8_INFINITY); + __ vmov(double_scratch, -V8_INFINITY, scratch); __ VFPCompareAndSetFlags(double_base, double_scratch); __ vneg(double_result, double_scratch, eq); __ b(eq, &done); @@ -3653,20 +3649,20 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ jmp(&done); __ bind(¬_plus_half); - __ vmov(double_scratch, -0.5); + __ vmov(double_scratch, -0.5, scratch); __ VFPCompareAndSetFlags(double_exponent, double_scratch); __ b(ne, &call_runtime); // Calculates square root of base. Check for the special case of // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). - __ vmov(double_scratch, -V8_INFINITY); + __ vmov(double_scratch, -V8_INFINITY, scratch); __ VFPCompareAndSetFlags(double_base, double_scratch); __ vmov(double_result, kDoubleRegZero, eq); __ b(eq, &done); // Add +0 to convert -0 to +0. __ vadd(double_scratch, double_base, kDoubleRegZero); - __ vmov(double_result, 1.0); + __ vmov(double_result, 1.0, scratch); __ vsqrt(double_scratch, double_scratch); __ vdiv(double_result, double_result, double_scratch); __ jmp(&done); @@ -3701,7 +3697,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ mov(exponent, scratch); } __ vmov(double_scratch, double_base); // Back up base. - __ vmov(double_result, 1.0); + __ vmov(double_result, 1.0, scratch2); // Get absolute value of exponent. __ cmp(scratch, Operand(0)); @@ -3717,7 +3713,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ cmp(exponent, Operand(0)); __ b(ge, &done); - __ vmov(double_scratch, 1.0); + __ vmov(double_scratch, 1.0, scratch); __ vdiv(double_result, double_scratch, double_result); // Test whether result is zero. Bail out to check for subnormal result. // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. @@ -4930,7 +4926,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: Subject string // regexp_data: RegExp data (FixedArray) // r0: Instance type of subject string - STATIC_ASSERT(4 == kAsciiStringTag); + STATIC_ASSERT(4 == kOneByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); // Find the code object based on the assumptions above. __ and_(r0, r0, Operand(kStringEncodingMask)); @@ -5154,7 +5150,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(subject, subject, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -5232,12 +5228,12 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // Set FixedArray length. __ mov(r6, Operand(r5, LSL, kSmiTagSize)); __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); - // Fill contents of fixed-array with the-hole. - __ mov(r2, Operand(factory->the_hole_value())); + // Fill contents of fixed-array with undefined. + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - // Fill fixed array elements with hole. + // Fill fixed array elements with undefined. // r0: JSArray, tagged. - // r2: the hole. + // r2: undefined. // r3: Start of elements in FixedArray. // r5: Number of elements to fill. Label loop; @@ -5432,48 +5428,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) { } -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -void CompareStub::PrintName(StringStream* stream) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - const char* cc_name; - switch (cc_) { - case lt: cc_name = "LT"; break; - case gt: cc_name = "GT"; break; - case le: cc_name = "LE"; break; - case ge: cc_name = "GE"; break; - case eq: cc_name = "EQ"; break; - case ne: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - bool is_equality = cc_ == eq || cc_ == ne; - stream->Add("CompareStub_%s", cc_name); - stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); - stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); - if (strict_ && is_equality) stream->Add("_STRICT"); - if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); - if (!include_number_compare_) stream->Add("_NO_NUMBER"); - if (!include_smi_compare_) stream->Add("_NO_SMI"); -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT((static_cast(cc_) >> 28) < (1 << 12)); - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - return ConditionField::encode(static_cast(cc_) >> 28) - | RegisterField::encode(lhs_.is(r0)) - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_) - | IncludeSmiCompareField::encode(include_smi_compare_); -} - - // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -5923,7 +5877,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); __ cmp(chars, scratch); __ b(eq, &found_in_symbol_table); __ bind(&next_probe[i]); @@ -6006,23 +5960,28 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(r2, ASR, 1), SetCC); - __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); - // If either to or from had the smi tag bit set, then carry is set now. - __ b(cs, &runtime); // Either "from" or "to" is not a smi. + // Arithmetic shift right by one un-smi-tags. In this case we rotate right + // instead because we bail out on non-smi values: ROR and ASR are equivalent + // for smis but they set the flags in a way that's easier to optimize. + __ mov(r2, Operand(r2, ROR, 1), SetCC); + __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); + // If either to or from had the smi tag bit set, then C is set now, and N + // has the same value: we rotated by 1, so the bottom bit is now the top bit. // We want to bailout to runtime here if From is negative. In that case, the // next instruction is not executed and we fall through to bailing out to - // runtime. pl is the opposite of mi. - // Both r2 and r3 are untagged integers. - __ sub(r2, r2, Operand(r3), SetCC, pl); - __ b(mi, &runtime); // Fail if from > to. + // runtime. + // Executed if both r2 and r3 are untagged integers. + __ sub(r2, r2, Operand(r3), SetCC, cc); + // One of the above un-smis or the above SUB could have set N==1. + __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r0, &runtime); - Condition is_string = masm->IsObjectStringType(r0, r1); + // Do a JumpIfSmi, but fold its jump into the subsequent string test. + __ tst(r0, Operand(kSmiTagMask)); + Condition is_string = masm->IsObjectStringType(r0, r1, ne); + ASSERT(is_string == eq); __ b(NegateCondition(is_string), &runtime); // Short-cut for the case of trivial substring. @@ -6093,7 +6052,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_slice); @@ -6131,12 +6090,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sequential_string); // Locate first character of underlying subject string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); - __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ bind(&allocate_result); // Sequential acii string. Allocate the result. - STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_sequential); @@ -6146,13 +6105,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Locate first character of substring to copy. __ add(r5, r5, r3); // Locate first character of result. - __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // r0: result string // r1: first character of result string // r2: result string length // r5: first character of substring to copy - STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); @@ -6277,7 +6236,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ add(scratch1, length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ add(left, left, Operand(scratch1)); __ add(right, right, Operand(scratch1)); __ rsb(length, length, Operand::Zero()); @@ -6430,8 +6389,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); // Try to lookup two character string in symbol table. If it is not found // just allocate a new one. @@ -6450,7 +6409,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // in a little endian mode) __ mov(r6, Operand(2)); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6500,11 +6459,6 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kAsciiDataHintMask)); __ tst(r5, Operand(kAsciiDataHintMask), ne); __ b(ne, &ascii_data); - __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ b(eq, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); @@ -6537,10 +6491,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r7, r0, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &first_prepared); @@ -6553,10 +6507,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r5, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r1, r1, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &second_prepared); @@ -6579,7 +6533,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &non_ascii_string_add_flat_result); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // r0: result string. // r7: first character of first string. // r1: first character of second string. @@ -6670,7 +6624,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); + ASSERT(state_ == CompareIC::SMI); Label miss; __ orr(r2, r1, r0); __ JumpIfNotSmi(r2, &miss); @@ -6691,31 +6645,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); + ASSERT(state_ == CompareIC::HEAP_NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; - __ and_(r2, r1, Operand(r0)); - __ JumpIfSmi(r2, &generic_stub); - __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &maybe_undefined1); - __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &maybe_undefined2); + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(r1, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(r0, &miss); + } // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP3 is unsupported. + // stub if NaN is involved or VFP2 is unsupported. if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); - // Load left and right operand - __ sub(r2, r1, Operand(kHeapObjectTag)); - __ vldr(d0, r2, HeapNumber::kValueOffset); + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(r0, &right_smi); + __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); __ sub(r2, r0, Operand(kHeapObjectTag)); __ vldr(d1, r2, HeapNumber::kValueOffset); + __ b(&left); + __ bind(&right_smi); + __ SmiUntag(r2, r0); // Can't clobber r0 yet. + SwVfpRegister single_scratch = d2.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d1, single_scratch); + + __ bind(&left); + __ JumpIfSmi(r1, &left_smi); + __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ sub(r2, r1, Operand(kHeapObjectTag)); + __ vldr(d0, r2, HeapNumber::kValueOffset); + __ b(&done); + __ bind(&left_smi); + __ SmiUntag(r2, r1); // Can't clobber r1 yet. + single_scratch = d3.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d0, single_scratch); - // Compare operands + __ bind(&done); + // Compare operands. __ VFPCompareAndSetFlags(d0, d1); // Don't base result on status bits when a NaN is involved. @@ -6729,14 +6705,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } __ bind(&unordered); - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); __ bind(&generic_stub); + ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(ne, &miss); + __ JumpIfSmi(r1, &unordered); __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); __ b(ne, &maybe_undefined2); __ jmp(&unordered); @@ -6754,7 +6732,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); + ASSERT(state_ == CompareIC::SYMBOL); Label miss; // Registers containing left and right operands respectively. @@ -6792,7 +6770,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); + ASSERT(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6870,7 +6848,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); + ASSERT(state_ == CompareIC::OBJECT); Label miss; __ and_(r2, r1, Operand(r0)); __ JumpIfSmi(r2, &miss); @@ -7064,8 +7042,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, ASSERT(!name.is(scratch1)); ASSERT(!name.is(scratch2)); - // Assert that name contains a string. - if (FLAG_debug_code) __ AbortIfNotString(name); + __ AssertString(name); // Compute the capacity mask. __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); @@ -7262,6 +7239,7 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { #undef REG + bool RecordWriteStub::IsPregenerated() { for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; !entry->object.is(no_reg); @@ -7303,6 +7281,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { } +bool CodeStub::CanUseFPRegisters() { + return CpuFeatures::IsSupported(VFP2); +} + + // Takes the input in 3 registers: address_ value_ and object_. A pointer to // the value has just been written into the object, now this stub makes sure // we keep the GC informed. The word in the object where the value has been @@ -7398,12 +7381,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(r0)); __ Move(address, regs_.address()); __ Move(r0, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ Move(r1, address); - } else { - ASSERT(mode == INCREMENTAL); - __ ldr(r1, MemOperand(address, 0)); - } + __ Move(r1, address); __ mov(r2, Operand(ExternalReference::isolate_address())); AllowExternalCallThatCantCauseGC scope(masm); @@ -7431,6 +7409,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( Label need_incremental; Label need_incremental_pop_scratch; + __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); + __ ldr(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC); + __ str(regs_.scratch1(), + MemOperand(regs_.scratch0(), + MemoryChunk::kWriteBarrierCounterOffset)); + __ b(mi, &need_incremental); + // Let's look at the color of the object: If it is not black we don't have // to inform the incremental marker. __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); @@ -7551,7 +7539,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2, + __ StoreNumberToDoubleElements(r0, r3, + // Overwrites all regs after this. + r5, r6, r7, r9, r2, &slow_elements); __ Ret(); } @@ -7559,6 +7549,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { + PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); ProfileEntryHookStub stub; __ push(lr); __ CallStub(&stub); @@ -7570,7 +7561,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { void ProfileEntryHookStub::Generate(MacroAssembler* masm) { // The entry hook is a "push lr" instruction, followed by a call. const int32_t kReturnAddressDistanceFromFunctionStart = - Assembler::kCallTargetAddressOffset + Assembler::kInstrSize; + 3 * Assembler::kInstrSize; // Save live volatile registers. __ Push(lr, r5, r1); diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 3ddc405715..0443cf799c 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -142,108 +142,6 @@ class UnaryOpStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - use_vfp2_ = CpuFeatures::IsSupported(VFP2); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - use_vfp2_(VFP2Bits::decode(key)), - operands_type_(operands_type), - result_type_(result_type) { } - - private: - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - Token::Value op_; - OverwriteMode mode_; - bool use_vfp2_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class VFP2Bits: public BitField {}; - class OperandTypeInfoBits: public BitField {}; - class ResultTypeInfoBits: public BitField {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | VFP2Bits::encode(use_vfp2_) - | OperandTypeInfoBits::encode(operands_type_) - | ResultTypeInfoBits::encode(result_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateSmiSmiOperation(MacroAssembler* masm); - void GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required); - void GenerateSmiCode(MacroAssembler* masm, - Label* use_runtime, - Label* gc_required, - SmiCodeGenerateHeapNumberResults heapnumber_results); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateAddStrings(MacroAssembler* masm); - void GenerateCallRuntime(MacroAssembler* masm); - - void GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); - } - - virtual void FinishCode(Handle code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); - } - - friend class CodeGenerator; -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -724,20 +622,6 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2); - // Loads objects from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will still be scratched. If - // either r0 or r1 is not a number (not smi and not heap number object) the - // not_number label is jumped to with r0 and r1 intact. - static void LoadOperands(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); - // Convert the smi or heap number in object to an int32 using the rules // for ToInt32 as described in ECMAScript 9.5.: the value is truncated // and brought into the range -2^31 .. +2^31 - 1. @@ -773,6 +657,7 @@ class FloatingPointHelper : public AllStatic { Register object, Destination destination, DwVfpRegister double_dst, + DwVfpRegister double_scratch, Register dst1, Register dst2, Register heap_number_map, @@ -794,7 +679,8 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2, Register scratch3, - DwVfpRegister double_scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, Label* not_int32); // Generate non VFP3 code to check if a double can be exactly represented by a @@ -834,7 +720,12 @@ class FloatingPointHelper : public AllStatic { Register heap_number_result, Register scratch); - private: + // Loads the objects from |object| into floating point registers. + // Depending on |destination| the value ends up either in |dst| or + // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3 + // must be supported. If kCoreRegisters are requested and VFP3 is + // supported, |dst| will be scratched. If |object| is neither smi nor + // heap number, |not_number| is jumped to with |object| still intact. static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register object, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 09166c3c01..bb771b18e2 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -31,11 +31,11 @@ #include "codegen.h" #include "macro-assembler.h" +#include "simulator-arm.h" namespace v8 { namespace internal { -#define __ ACCESS_MASM(masm) UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { switch (type) { @@ -49,6 +49,74 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { } +#define __ masm. + + +#if defined(USE_SIMULATOR) +byte* fast_exp_arm_machine_code = NULL; +double fast_exp_simulator(double x) { + return Simulator::current(Isolate::Current())->CallFP( + fast_exp_arm_machine_code, x, 0); +} +#endif + + +UnaryMathFunction CreateExpFunction() { + if (!CpuFeatures::IsSupported(VFP2)) return &exp; + if (!FLAG_fast_math) return &exp; + size_t actual_size; + byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast(actual_size)); + + { + CpuFeatures::Scope use_vfp(VFP2); + DoubleRegister input = d0; + DoubleRegister result = d1; + DoubleRegister double_scratch1 = d2; + DoubleRegister double_scratch2 = d3; + Register temp1 = r4; + Register temp2 = r5; + Register temp3 = r6; + + if (masm.use_eabi_hardfloat()) { + // Input value is in d0 anyway, nothing to do. + } else { + __ vmov(input, r0, r1); + } + __ Push(temp3, temp2, temp1); + MathExpGenerator::EmitMathExp( + &masm, input, result, double_scratch1, double_scratch2, + temp1, temp2, temp3); + __ Pop(temp3, temp2, temp1); + if (masm.use_eabi_hardfloat()) { + __ vmov(d0, result); + } else { + __ vmov(r0, r1, result); + } + __ Ret(); + } + + CodeDesc desc; + masm.GetCode(&desc); + + CPU::FlushICache(buffer, actual_size); + OS::ProtectCode(buffer, actual_size); + +#if !defined(USE_SIMULATOR) + return FUNCTION_CAST(buffer); +#else + fast_exp_arm_machine_code = buffer; + return &fast_exp_simulator; +#endif +} + + +#undef __ + + UnaryMathFunction CreateSqrtFunction() { return &sqrt; } @@ -73,6 +141,8 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators +#define __ ACCESS_MASM(masm) + void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -192,7 +262,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( HeapObject::kMapOffset, r3, r9, - kLRHasBeenSaved, + kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -416,7 +486,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ b(ne, &external_string); // Prepare sequential strings - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ add(string, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -450,8 +520,188 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ bind(&done); } + +void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value) { + if (FLAG_debug_code) { + __ tst(index, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi index"); + __ tst(value, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi value"); + + __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); + __ cmp(index, ip); + __ Check(lt, "Index is too large"); + + __ cmp(index, Operand(Smi::FromInt(0))); + __ Check(ge, "Index is negative"); + + __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + + __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(eq, "Unexpected string type"); + } + + __ add(ip, + string, + Operand(SeqString::kHeaderSize - kHeapObjectTag)); + __ SmiUntag(value, value); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + if (encoding == String::ONE_BYTE_ENCODING) { + // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline. + __ strb(value, MemOperand(ip, index, LSR, 1)); + } else { + // No need to untag a smi for two-byte addressing. + __ strh(value, MemOperand(ip, index)); + } +} + + +static MemOperand ExpConstant(int index, Register base) { + return MemOperand(base, index * kDoubleSize); +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3) { + ASSERT(!input.is(result)); + ASSERT(!input.is(double_scratch1)); + ASSERT(!input.is(double_scratch2)); + ASSERT(!result.is(double_scratch1)); + ASSERT(!result.is(double_scratch2)); + ASSERT(!double_scratch1.is(double_scratch2)); + ASSERT(!temp1.is(temp2)); + ASSERT(!temp1.is(temp3)); + ASSERT(!temp2.is(temp3)); + ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + + Label done; + + __ mov(temp3, Operand(ExternalReference::math_exp_constants(0))); + + __ vldr(double_scratch1, ExpConstant(0, temp3)); + __ vmov(result, kDoubleRegZero); + __ VFPCompareAndSetFlags(double_scratch1, input); + __ b(ge, &done); + __ vldr(double_scratch2, ExpConstant(1, temp3)); + __ VFPCompareAndSetFlags(input, double_scratch2); + __ vldr(result, ExpConstant(2, temp3)); + __ b(ge, &done); + __ vldr(double_scratch1, ExpConstant(3, temp3)); + __ vldr(result, ExpConstant(4, temp3)); + __ vmul(double_scratch1, double_scratch1, input); + __ vadd(double_scratch1, double_scratch1, result); + __ vmov(temp2, temp1, double_scratch1); + __ vsub(double_scratch1, double_scratch1, result); + __ vldr(result, ExpConstant(6, temp3)); + __ vldr(double_scratch2, ExpConstant(5, temp3)); + __ vmul(double_scratch1, double_scratch1, double_scratch2); + __ vsub(double_scratch1, double_scratch1, input); + __ vsub(result, result, double_scratch1); + __ vmul(input, double_scratch1, double_scratch1); + __ vmul(result, result, input); + __ mov(temp1, Operand(temp2, LSR, 11)); + __ vldr(double_scratch2, ExpConstant(7, temp3)); + __ vmul(result, result, double_scratch2); + __ vsub(result, result, double_scratch1); + __ vldr(double_scratch2, ExpConstant(8, temp3)); + __ vadd(result, result, double_scratch2); + __ movw(ip, 0x7ff); + __ and_(temp2, temp2, Operand(ip)); + __ add(temp1, temp1, Operand(0x3ff)); + __ mov(temp1, Operand(temp1, LSL, 20)); + + // Must not call ExpConstant() after overwriting temp3! + __ mov(temp3, Operand(ExternalReference::math_exp_log_table())); + __ ldr(ip, MemOperand(temp3, temp2, LSL, 3)); + __ add(temp3, temp3, Operand(kPointerSize)); + __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3)); + __ orr(temp1, temp1, temp2); + __ vmov(input, ip, temp1); + __ vmul(result, result, input); + __ bind(&done); +} + #undef __ +// add(r0, pc, Operand(-8)) +static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; + +static byte* GetNoCodeAgeSequence(uint32_t* length) { + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found in FUNCTIONS + static bool initialized = false; + static uint32_t sequence[kNoCodeAgeSequenceLength]; + byte* byte_sequence = reinterpret_cast(sequence); + *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; + if (!initialized) { + CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength); + PredictableCodeSizeScope scope(patcher.masm(), *length); + patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex); + patcher.masm()->add(fp, sp, Operand(2 * kPointerSize)); + initialized = true; + } + return byte_sequence; +} + + +bool Code::IsYoungSequence(byte* sequence) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + bool result = !memcmp(sequence, young_sequence, young_length); + ASSERT(result || + Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); + return result; +} + + +void Code::GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(sequence)) { + *age = kNoAge; + *parity = NO_MARKING_PARITY; + } else { + Address target_address = Memory::Address_at( + sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + if (age == kNoAge) { + memcpy(sequence, young_sequence, young_length); + CPU::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(age, parity); + CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); + patcher.masm()->add(r0, pc, Operand(-8)); + patcher.masm()->ldr(pc, MemOperand(pc, -4)); + patcher.masm()->dd(reinterpret_cast(stub->instruction_start())); + } +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index c340e6b108..8f0033e2ce 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -88,6 +88,22 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; + +class MathExpGenerator : public AllStatic { + public: + static void EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 5aadc3caeb..a569383f24 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -75,10 +75,6 @@ #endif -#if CAN_USE_UNALIGNED_ACCESSES -#define V8_TARGET_CAN_READ_UNALIGNED 1 -#endif - // Using blx may yield better code, so use it when required or when available #if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS) #define USE_BLX 1 @@ -88,9 +84,18 @@ namespace v8 { namespace internal { // Constant pool marker. -const int kConstantPoolMarkerMask = 0xffe00000; -const int kConstantPoolMarker = 0x0c000000; -const int kConstantPoolLengthMask = 0x001ffff; +// Use UDF, the permanently undefined instruction. +const int kConstantPoolMarkerMask = 0xfff000f0; +const int kConstantPoolMarker = 0xe7f000f0; +const int kConstantPoolLengthMaxMask = 0xffff; +inline int EncodeConstantPoolLength(int length) { + ASSERT((length & kConstantPoolLengthMaxMask) == length); + return ((length & 0xfff0) << 4) | (length & 0xf); +} +inline int DecodeConstantPoolLength(int instr) { + ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker); + return ((instr >> 4) & 0xfff0) | (instr & 0xf); +} // Number of registers in normal ARM mode. const int kNumRegisters = 16; @@ -691,6 +696,9 @@ class Instruction { && (Bit(20) == 0) && ((Bit(7) == 0)); } + // Test for a nop instruction, which falls under type 1. + inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; } + // Test for a stop instruction. inline bool IsStop() const { return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode); diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 3e7a1e9d0e..c2941be06d 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -48,7 +48,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { // add sp, sp, #4 // bx lr // to a call to the debug break return code. - // #if USE_BLX + // #ifdef USE_BLX // ldr ip, [pc, #0] // blx ip // #else @@ -99,7 +99,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() { // mov r2, r2 // mov r2, r2 // to a call to the debug break slot code. - // #if USE_BLX + // #ifdef USE_BLX // ldr ip, [pc, #0] // blx ip // #else diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 5339be1d84..ee2a581a57 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -104,19 +104,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // ignore all slots that might have been recorded on it. isolate->heap()->mark_compact_collector()->InvalidateCode(code); - // Iterate over all the functions which share the same code object - // and make them use unoptimized version. - Context* context = function->context()->native_context(); - Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST); - SharedFunctionInfo* shared = function->shared(); - while (!element->IsUndefined()) { - JSFunction* func = JSFunction::cast(element); - // Grab element before code replacement as ReplaceCode alters the list. - element = func->next_function_link(); - if (func->code() == code) { - func->ReplaceCode(shared->code()); - } - } + ReplaceCodeForRelatedFunctions(function, code); if (FLAG_trace_deopt) { PrintF("[forced deoptimization: "); @@ -126,7 +114,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -static const int32_t kBranchBeforeStackCheck = 0x2a000001; static const int32_t kBranchBeforeInterrupt = 0x5a000004; @@ -135,24 +122,21 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; - // The call of the stack guard check has the following form: - // e1 5d 00 0c cmp sp, - // 2a 00 00 01 bcs ok + // The back edge bookkeeping code matches the pattern: + // + // + // 2a 00 00 01 bpl ok // e5 9f c? ?? ldr ip, [pc, ] // e1 2f ff 3c blx ip ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); ASSERT(Assembler::IsLdrPcImmediateOffset( Assembler::instr_at(pc_after - 2 * kInstrSize))); - if (FLAG_count_based_interrupts) { - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } else { - ASSERT_EQ(kBranchBeforeStackCheck, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); // We patch the code to the following form: - // e1 5d 00 0c cmp sp, + // + // // e1 a0 00 00 mov r0, r0 (NOP) // e5 9f c? ?? ldr ip, [pc, ] // e1 2f ff 3c blx ip @@ -189,15 +173,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, // Replace NOP with conditional jump. CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - if (FLAG_count_based_interrupts) { - patcher.masm()->b(+16, pl); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } else { - patcher.masm()->b(+4, cs); - ASSERT_EQ(kBranchBeforeStackCheck, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } + patcher.masm()->b(+16, pl); + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); // Replace the stack check address in the constant pool // with the entry address of the replacement code. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 96a7d3ce6b..cb0a6cb5c7 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -692,11 +692,19 @@ void Decoder::DecodeType01(Instruction* instr) { // Rn field to encode it. Format(instr, "mul'cond's 'rn, 'rm, 'rs"); } else { - // The MLA instruction description (A 4.1.28) refers to the order - // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the - // Rn field to encode the Rd register and the Rd field to encode - // the Rn register. - Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); + if (instr->Bit(22) == 0) { + // The MLA instruction description (A 4.1.28) refers to the order + // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the + // Rn field to encode the Rd register and the Rd field to encode + // the Rn register. + Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); + } else { + // The MLS instruction description (A 4.1.29) refers to the order + // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the + // Rn field to encode the Rd register and the Rd field to encode + // the Rn register. + Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd"); + } } } else { // The signed/long multiply instructions use the terms RdHi and RdLo @@ -822,6 +830,8 @@ void Decoder::DecodeType01(Instruction* instr) { } else { Unknown(instr); // not used by V8 } + } else if ((type == 1) && instr->IsNopType1()) { + Format(instr, "nop'cond"); } else { switch (instr->OpcodeField()) { case AND: { @@ -974,6 +984,17 @@ void Decoder::DecodeType3(Instruction* instr) { break; } case db_x: { + if (FLAG_enable_sudiv) { + if (!instr->HasW()) { + if (instr->Bits(5, 4) == 0x1) { + if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { + // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs + Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs"); + break; + } + } + } + } Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); break; } @@ -1077,6 +1098,7 @@ int Decoder::DecodeType7(Instruction* instr) { // Dd = vadd(Dn, Dm) // Dd = vsub(Dn, Dm) // Dd = vmul(Dn, Dm) +// Dd = vmla(Dn, Dm) // Dd = vdiv(Dn, Dm) // vcmp(Dd, Dm) // vmrs @@ -1139,6 +1161,12 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } else { Unknown(instr); // Not used by V8. } + } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) { + if (instr->SzValue() == 0x1) { + Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { if (instr->SzValue() == 0x1) { Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); @@ -1367,7 +1395,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) { int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { if (IsConstantPoolAt(instr_ptr)) { int instruction_bits = *(reinterpret_cast(instr_ptr)); - return instruction_bits & kConstantPoolLengthMask; + return DecodeConstantPoolLength(instruction_bits); } else { return -1; } @@ -1389,8 +1417,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) { if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "constant pool begin (length %d)", - instruction_bits & - kConstantPoolLengthMask); + DecodeConstantPoolLength(instruction_bits)); return Instruction::kInstrSize; } switch (instr->TypeValue()) { diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index b2f629b26c..3b560fedfa 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle(Smi::FromInt(FLAG_interrupt_budget))); + Handle(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -164,14 +164,19 @@ void FullCodeGenerator::Generate() { int locals_count = info->scope()->num_stack_slots(); - __ Push(lr, fp, cp, r1); - if (locals_count > 0) { + info->set_prologue_offset(masm_->pc_offset()); + { + PredictableCodeSizeScope predictible_code_size_scope( + masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); // Load undefined value here, so the value is ready for the loop // below. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); } - // Adjust fp to point to caller's fp. - __ add(fp, sp, Operand(2 * kPointerSize)); { Comment cmnt(masm_, "[ Allocate locals"); for (int i = 0; i < locals_count; i++) { @@ -287,6 +292,7 @@ void FullCodeGenerator::Generate() { __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(hs, &ok); + PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); StackCheckStub stub; __ CallStub(&stub); __ bind(&ok); @@ -341,41 +347,31 @@ void FullCodeGenerator::EmitProfilingCounterReset() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, - Label* back_edge_target) { - Comment cmnt(masm_, "[ Stack check"); +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Back edge bookkeeping"); // Block literal pools whilst emitting stack check code. Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; - if (FLAG_count_based_interrupts) { - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); - } - EmitProfilingCounterDecrement(weight); - __ b(pl, &ok); - InterruptStub stub; - __ CallStub(&stub); - } else { - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); } + EmitProfilingCounterDecrement(weight); + __ b(pl, &ok); + InterruptStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordStackCheck(stmt->OsrEntryId()); + RecordBackEdge(stmt->OsrEntryId()); - if (FLAG_count_based_interrupts) { - EmitProfilingCounterReset(); - } + EmitProfilingCounterReset(); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -437,6 +433,8 @@ void FullCodeGenerator::EmitReturnSequence() { // tool from instrumenting as we rely on the code size here. int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); + // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5! + PredictableCodeSizeScope predictable(masm_, -1); __ RecordJSReturn(); masm_->mov(sp, fp); masm_->ldm(ia_w, sp, fp.bit() | lr.bit()); @@ -911,34 +909,33 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - VariableProxy* proxy = declaration->proxy(); - Variable* variable = proxy->var(); - Handle instance = declaration->module()->interface()->Instance(); - ASSERT(!instance.is_null()); + Variable* variable = declaration->proxy()->var(); + ASSERT(variable->location() == Variable::CONTEXT); + ASSERT(variable->interface()->IsFrozen()); - switch (variable->location()) { - case Variable::UNALLOCATED: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - globals_->Add(variable->name(), zone()); - globals_->Add(instance, zone()); - Visit(declaration->module()); - break; - } + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); - case Variable::CONTEXT: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); - __ mov(r1, Operand(instance)); - __ str(r1, ContextOperand(cp, variable->index())); - Visit(declaration->module()); - break; - } + // Load instance object. + __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope())); + __ ldr(r1, ContextOperand(r1, variable->interface()->Index())); + __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX)); - case Variable::PARAMETER: - case Variable::LOCAL: - case Variable::LOOKUP: - UNREACHABLE(); - } + // Assign it. + __ str(r1, ContextOperand(cp, variable->index())); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(cp, + Context::SlotOffset(variable->index()), + r1, + r3, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); } @@ -981,6 +978,14 @@ void FullCodeGenerator::DeclareGlobals(Handle pairs) { } +void FullCodeGenerator::DeclareModules(Handle descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1137,7 +1142,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ cmp(r1, Operand(Smi::FromInt(0))); __ b(eq, &no_descriptors); - __ LoadInstanceDescriptors(r0, r2, r4); + __ LoadInstanceDescriptors(r0, r2); __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset)); __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset)); @@ -1235,7 +1240,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ add(r0, r0, Operand(Smi::FromInt(1))); __ push(r0); - EmitStackCheck(stmt, &loop); + EmitBackEdgeBookkeeping(stmt, &loop); __ b(&loop); // Remove the pointers stored on the stack. @@ -1388,9 +1393,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == CONST_HARMONY || - local->mode() == LET) { + if (local->mode() == LET || + local->mode() == CONST || + local->mode() == CONST_HARMONY) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); if (local->mode() == CONST) { __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); @@ -2183,43 +2188,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { ASSERT(prop != NULL); ASSERT(prop->key()->AsLiteral() != NULL); - // If the assignment starts a block of assignments to the same object, - // change to slow case to avoid the quadratic behavior of repeatedly - // adding fast properties. - if (expr->starts_initialization_block()) { - __ push(result_register()); - __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value. - __ push(ip); - __ CallRuntime(Runtime::kToSlowProperties, 1); - __ pop(result_register()); - } - // Record source code position before IC call. SetSourcePosition(expr->position()); __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); - // Load receiver to r1. Leave a copy in the stack if needed for turning the - // receiver into fast case. - if (expr->ends_initialization_block()) { - __ ldr(r1, MemOperand(sp)); - } else { - __ pop(r1); - } + __ pop(r1); Handle ic = is_classic_mode() ? isolate()->builtins()->StoreIC_Initialize() : isolate()->builtins()->StoreIC_Initialize_Strict(); CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId()); - // If the assignment ends an initialization block, revert to fast case. - if (expr->ends_initialization_block()) { - __ push(r0); // Result of assignment, saved even if not needed. - // Receiver is under the result value. - __ ldr(ip, MemOperand(sp, kPointerSize)); - __ push(ip); - __ CallRuntime(Runtime::kToFastProperties, 1); - __ pop(r0); - __ Drop(1); - } PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); context()->Plug(r0); } @@ -2228,44 +2206,16 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { // Assignment to a property, using a keyed store IC. - // If the assignment starts a block of assignments to the same object, - // change to slow case to avoid the quadratic behavior of repeatedly - // adding fast properties. - if (expr->starts_initialization_block()) { - __ push(result_register()); - // Receiver is now under the key and value. - __ ldr(ip, MemOperand(sp, 2 * kPointerSize)); - __ push(ip); - __ CallRuntime(Runtime::kToSlowProperties, 1); - __ pop(result_register()); - } - // Record source code position before IC call. SetSourcePosition(expr->position()); __ pop(r1); // Key. - // Load receiver to r2. Leave a copy in the stack if needed for turning the - // receiver into fast case. - if (expr->ends_initialization_block()) { - __ ldr(r2, MemOperand(sp)); - } else { - __ pop(r2); - } + __ pop(r2); Handle ic = is_classic_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize() : isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId()); - // If the assignment ends an initialization block, revert to fast case. - if (expr->ends_initialization_block()) { - __ push(r0); // Result of assignment, saved even if not needed. - // Receiver is under the result value. - __ ldr(ip, MemOperand(sp, kPointerSize)); - __ push(ip); - __ CallRuntime(Runtime::kToFastProperties, 1); - __ pop(r0); - __ Drop(1); - } PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); context()->Plug(r0); } @@ -2294,7 +2244,9 @@ void FullCodeGenerator::CallIC(Handle code, RelocInfo::Mode rmode, TypeFeedbackId ast_id) { ic_total_count_++; - __ Call(code, rmode, ast_id); + // All calls must have a predictable size in full-codegen code to ensure that + // the debugger can patch them correctly. + __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS); } void FullCodeGenerator::EmitCallWithIC(Call* expr, @@ -2424,7 +2376,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the // call. Then we call the resolved function using the given @@ -2714,7 +2666,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - if (generate_debug_code_) __ AbortIfSmi(r0); + __ AssertNotSmi(r0); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset)); @@ -2729,26 +2681,31 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ b(eq, if_false); // Look for valueOf symbol in the descriptor array, and indicate false if - // found. The type is not checked, so if it is a transition it is a false - // negative. - __ LoadInstanceDescriptors(r1, r4, r3); - __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // r4: descriptor array - // r3: length of descriptor array - // Calculate the end of the descriptor array. + // found. Since we omit an enumeration index check, if it is added via a + // transition that shares its descriptor array, this is a false positive. + Label entry, loop, done; + + // Skip loop if no descriptors are valid. + __ NumberOfOwnDescriptors(r3, r1); + __ cmp(r3, Operand(0)); + __ b(eq, &done); + + __ LoadInstanceDescriptors(r1, r4); + // r4: descriptor array. + // r3: valid entries in the descriptor array. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kPointerSize == 4); - __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ mov(ip, Operand(DescriptorArray::kDescriptorSize)); + __ mul(r3, r3, ip); + // Calculate location of the first key name. + __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); + // Calculate the end of the descriptor array. + __ mov(r2, r4); __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - // Calculate location of the first key name. - __ add(r4, - r4, - Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); // Loop through all the keys in the descriptor array. If one of these is the // symbol valueOf the result is false. - Label entry, loop; // The use of ip to store the valueOf symbol asumes that it is not otherwise // used in the loop below. __ mov(ip, Operand(FACTORY->value_of_symbol())); @@ -2762,7 +2719,8 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ cmp(r4, Operand(r2)); __ b(ne, &loop); - // If a valueOf property is not found on the object check that it's + __ bind(&done); + // If a valueOf property is not found on the object check that its // prototype is the un-modified String prototype. If not result is false. __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); __ JumpIfSmi(r2, if_false); @@ -3173,6 +3131,39 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(r2); + __ pop(r1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::ONE_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(r2); + __ pop(r1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::TWO_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); + context()->Plug(r0); +} + + + void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList* args = expr->arguments(); @@ -3583,8 +3574,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { ASSERT(args->length() == 1); VisitForAccumulatorValue(args->at(0)); - __ AbortIfNotString(r0); - + __ AssertString(r0); __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); __ IndexFromHash(r0, r0); @@ -3666,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); - __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); __ add(string_length, string_length, Operand(scratch1), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); @@ -3695,7 +3685,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times array_length) - separator length to the // string_length to get the length of the result string. array_length is not // smi but the other values are, so the result is a smi - __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ sub(string_length, string_length, Operand(scratch1)); __ smull(scratch2, ip, array_length, scratch1); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are @@ -3733,10 +3723,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { array_length = no_reg; __ add(result_pos, result, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Check the length of the separator. - __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ cmp(scratch1, Operand(Smi::FromInt(1))); __ b(eq, &one_char_separator); __ b(gt, &long_separator); @@ -3752,7 +3742,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &empty_separator_loop); // End while (element < elements_end). @@ -3762,7 +3754,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // One-character separator case __ bind(&one_char_separator); // Replace separator with its ASCII character value. - __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); + __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); // Jump into the loop after the code that copies the separator, so the first // element is not preceded by a separator __ jmp(&one_char_separator_loop_entry); @@ -3782,7 +3774,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &one_char_separator_loop); // End while (element < elements_end). @@ -3803,14 +3797,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiUntag(string_length); __ add(string, separator, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ bind(&long_separator); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &long_separator_loop); // End while (element < elements_end). @@ -4115,7 +4111,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); } - __ mov(r1, Operand(Smi::FromInt(count_value))); + __ mov(r1, r0); + __ mov(r0, Operand(Smi::FromInt(count_value))); // Record position before stub call. SetSourcePosition(expr->position()); @@ -4340,29 +4337,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cond = eq; - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - cond = eq; - break; - case Token::LT: - cond = lt; - break; - case Token::GT: - cond = gt; - break; - case Token::LTE: - cond = le; - break; - case Token::GTE: - cond = ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } + Condition cond = CompareIC::ComputeCondition(op); __ pop(r1); bool inline_smi_code = ShouldInlineSmiCase(op); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 404f3c6145..29a3687aa9 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1301,6 +1301,143 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, } +static void KeyedStoreGenerateGenericHelper( + MacroAssembler* masm, + Label* fast_object, + Label* fast_double, + Label* slow, + KeyedStoreCheckMap check_map, + KeyedStoreIncrementLength increment_length, + Register value, + Register key, + Register receiver, + Register receiver_map, + Register elements_map, + Register elements) { + Label transition_smi_elements; + Label finish_object_store, non_double_value, transition_double_elements; + Label fast_double_without_map_check; + + // Fast case: Do the store, could be either Object or double. + __ bind(fast_object); + Register scratch_value = r4; + Register address = r5; + if (check_map == kCheckMap) { + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ b(ne, fast_double); + } + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(scratch_value, key, Operand(Smi::FromInt(1))); + __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + // It's irrelevant whether array is smi-only or not when writing a smi. + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); + __ Ret(); + + __ bind(&non_smi_value); + // Escape to elements kind transition case. + __ CheckFastObjectElements(receiver_map, scratch_value, + &transition_smi_elements); + + // Fast elements array, store the value to the elements backing store. + __ bind(&finish_object_store); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(scratch_value, key, Operand(Smi::FromInt(1))); + __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); + // Update write barrier for the elements array address. + __ mov(scratch_value, value); // Preserve the value which is returned. + __ RecordWrite(elements, + address, + scratch_value, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Ret(); + + __ bind(fast_double); + if (check_map == kCheckMap) { + // Check for fast double array case. If this fails, call through to the + // runtime. + __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); + __ b(ne, slow); + } + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, + key, + elements, // Overwritten. + r3, // Scratch regs... + r4, + r5, + r6, + &transition_double_elements); + if (increment_length == kIncrementLength) { + // Add 1 to receiver->length. + __ add(scratch_value, key, Operand(Smi::FromInt(1))); + __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); + } + __ Ret(); + + __ bind(&transition_smi_elements); + // Transition the array appropriately depending on the value type. + __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); + __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); + __ b(ne, &non_double_value); + + // Value is a double. Transition FAST_SMI_ELEMENTS -> + // FAST_DOUBLE_ELEMENTS and complete the store. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS, + receiver_map, + r4, + slow); + ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 + ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&fast_double_without_map_check); + + __ bind(&non_double_value); + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_ELEMENTS, + receiver_map, + r4, + slow); + ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); + + __ bind(&transition_double_elements); + // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a + // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and + // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS, + receiver_map, + r4, + slow); + ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 + ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ jmp(&finish_object_store); +} + + void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode) { // ---------- S t a t e -------------- @@ -1309,11 +1446,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, array, extra, check_if_double_array; - Label fast_object_with_map_check, fast_object_without_map_check; - Label fast_double_with_map_check, fast_double_without_map_check; - Label transition_smi_elements, finish_object_store, non_double_value; - Label transition_double_elements; + Label slow, fast_object, fast_object_grow; + Label fast_double, fast_double_grow; + Label array, extra, check_if_double_array; // Register usage. Register value = r0; @@ -1348,7 +1483,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); - __ b(lo, &fast_object_with_map_check); + __ b(lo, &fast_object); // Slow case, handle jump to runtime. __ bind(&slow); @@ -1373,21 +1508,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); __ b(ne, &check_if_double_array); - // Calculate key + 1 as smi. - STATIC_ASSERT(kSmiTag == 0); - __ add(r4, key, Operand(Smi::FromInt(1))); - __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ b(&fast_object_without_map_check); + __ jmp(&fast_object_grow); __ bind(&check_if_double_array); __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_double_array_map())); __ b(ne, &slow); - // Add 1 to key, and go to common element store code for doubles. - STATIC_ASSERT(kSmiTag == 0); - __ add(r4, key, Operand(Smi::FromInt(1))); - __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ jmp(&fast_double_without_map_check); + __ jmp(&fast_double_grow); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it @@ -1399,106 +1526,15 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ cmp(key, Operand(ip)); __ b(hs, &extra); - // Fall through to fast case. - - __ bind(&fast_object_with_map_check); - Register scratch_value = r4; - Register address = r5; - __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_array_map())); - __ b(ne, &fast_double_with_map_check); - __ bind(&fast_object_without_map_check); - // Smi stores don't require further checks. - Label non_smi_value; - __ JumpIfNotSmi(value, &non_smi_value); - // It's irrelevant whether array is smi-only or not when writing a smi. - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); - __ Ret(); - __ bind(&non_smi_value); - // Escape to elements kind transition case. - __ CheckFastObjectElements(receiver_map, scratch_value, - &transition_smi_elements); - // Fast elements array, store the value to the elements backing store. - __ bind(&finish_object_store); - __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(address)); - // Update write barrier for the elements array address. - __ mov(scratch_value, value); // Preserve the value which is returned. - __ RecordWrite(elements, - address, - scratch_value, - kLRHasNotBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - __ Ret(); - - __ bind(&fast_double_with_map_check); - // Check for fast double array case. If this fails, call through to the - // runtime. - __ cmp(elements_map, - Operand(masm->isolate()->factory()->fixed_double_array_map())); - __ b(ne, &slow); - __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - receiver, - elements, - r3, - r4, - r5, - r6, - &transition_double_elements); - __ Ret(); - - __ bind(&transition_smi_elements); - // Transition the array appropriately depending on the value type. - __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); - __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); - __ b(ne, &non_double_value); - - // Value is a double. Transition FAST_SMI_ELEMENTS -> - // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS, - receiver_map, - r4, - &slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); - __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ jmp(&fast_double_without_map_check); - - __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - r4, - &slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); - __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ jmp(&finish_object_store); - - __ bind(&transition_double_elements); - // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a - // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and - // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS, - receiver_map, - r4, - &slow); - ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow); - __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ jmp(&finish_object_store); + KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, + &slow, kCheckMap, kDontIncrementLength, + value, key, receiver, receiver_map, + elements_map, elements); + KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, + &slow, kDontCheckMap, kIncrementLength, + value, key, receiver, receiver_map, + elements_map, elements); } @@ -1662,42 +1698,21 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -void CompareIC::UpdateCaches(Handle x, Handle y) { - HandleScope scope; - Handle rewritten; - State previous_state = GetState(); - State state = TargetState(previous_state, false, x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - if (state == KNOWN_OBJECTS) { - stub.set_known_map(Handle(Handle::cast(x)->map())); - } - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif +bool CompareIC::HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address cmp_instruction_address = + Assembler::return_address_from_call_start(address); - // Activate inlined smi code. - if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); - } + // If the instruction following the call is not a cmp rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(cmp_instruction_address); + return Assembler::IsCmpImmediate(instr); } void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { Address cmp_instruction_address = - address + Assembler::kCallTargetAddressOffset; + Assembler::return_address_from_call_start(address); // If the instruction following the call is not a cmp rx, #yyy, nothing // was inlined. diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index fc1d64079a..4203673733 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -177,6 +177,7 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; case Token::SHL: return "shl-t"; case Token::SAR: return "sar-t"; case Token::SHR: return "shr-t"; @@ -194,22 +195,22 @@ void LGoto::PrintDataTo(StringStream* stream) { void LBranch::PrintDataTo(StringStream* stream) { stream->Add("B%d | B%d on ", true_block_id(), false_block_id()); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); } void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); - InputAt(0)->PrintTo(stream); + left()->PrintTo(stream); stream->Add(" %s ", Token::String(op())); - InputAt(1)->PrintTo(stream); + right()->PrintTo(stream); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(kind() == kStrictEquality ? " === " : " == "); stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); @@ -218,57 +219,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) { void LIsObjectAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_object("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LIsStringAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_string("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LIsSmiAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_smi("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if is_undetectable("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LStringCompareAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if string_compare("); - InputAt(0)->PrintTo(stream); - InputAt(1)->PrintTo(stream); + left()->PrintTo(stream); + right()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if has_instance_type("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if has_cached_array_index("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(") then B%d else B%d", true_block_id(), false_block_id()); } void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if class_of_test("); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(), true_block_id(), @@ -278,7 +279,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) { void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if typeof "); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); stream->Add(" == \"%s\" then B%d else B%d", *hydrogen()->type_literal()->ToCString(), true_block_id(), false_block_id()); @@ -292,26 +293,31 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) { void LUnaryMathOperation::PrintDataTo(StringStream* stream) { stream->Add("/%s ", hydrogen()->OpName()); - InputAt(0)->PrintTo(stream); + value()->PrintTo(stream); +} + + +void LMathExp::PrintDataTo(StringStream* stream) { + value()->PrintTo(stream); } void LLoadContextSlot::PrintDataTo(StringStream* stream) { - InputAt(0)->PrintTo(stream); + context()->PrintTo(stream); stream->Add("[%d]", slot_index()); } void LStoreContextSlot::PrintDataTo(StringStream* stream) { - InputAt(0)->PrintTo(stream); + context()->PrintTo(stream); stream->Add("[%d] <- ", slot_index()); - InputAt(1)->PrintTo(stream); + value()->PrintTo(stream); } void LInvokeFunction::PrintDataTo(StringStream* stream) { stream->Add("= "); - InputAt(0)->PrintTo(stream); + function()->PrintTo(stream); stream->Add(" #%d / ", arity()); } @@ -340,17 +346,15 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) { void LCallNew::PrintDataTo(StringStream* stream) { stream->Add("= "); - InputAt(0)->PrintTo(stream); + constructor()->PrintTo(stream); stream->Add(" #%d / ", arity()); } void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); - stream->Add(" length "); length()->PrintTo(stream); - stream->Add(" index "); index()->PrintTo(stream); } @@ -374,20 +378,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", additional_index()); + } else { + stream->Add("]"); + } } -void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { +void LStoreKeyed::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", additional_index()); + } else { + stream->Add("] <- "); + } value()->PrintTo(stream); } @@ -860,6 +871,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( argument_count_, value_count, outer, + hydrogen_env->entry(), zone()); int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { @@ -1034,6 +1046,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* input = UseFixedDouble(instr->value(), d2); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); return MarkAsCall(DefineFixedDouble(result, d2), instr); + } else if (op == kMathExp) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* input = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. + LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); + return DefineAsRegister(result); } else if (op == kMathPowHalf) { LOperand* input = UseFixedDouble(instr->value(), d2); LOperand* temp = FixedTemp(d3); @@ -1041,7 +1062,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { return DefineFixedDouble(result, d2); } else { LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; + + LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL; LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); switch (op) { case kMathAbs: @@ -1108,6 +1130,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1306,8 +1333,21 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); + if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) { + HAdd* add = HAdd::cast(instr->uses().value()); + if (instr == add->left()) { + // This mul is the lhs of an add. The add and mul will be folded + // into a multiply-add. + return NULL; + } + if (instr == add->right() && !add->left()->IsMul()) { + // This mul is the rhs of an add, where the lhs is not another mul. + // The add and mul will be folded into a multiply-add. + return NULL; + } + } + return DoArithmeticD(Token::MUL, instr); } else { return DoArithmeticT(Token::MUL, instr); } @@ -1318,6 +1358,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); + + if (instr->left()->IsConstant()) { + // If lhs is constant, do reverse subtraction instead. + return DoRSub(instr); + } + LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1334,6 +1380,32 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } +LInstruction* LChunkBuilder::DoRSub(HSub* instr) { + ASSERT(instr->representation().IsInteger32()); + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); + + // Note: The lhs of the subtraction becomes the rhs of the + // reverse-subtraction. + LOperand* left = UseRegisterAtStart(instr->right()); + LOperand* right = UseOrConstantAtStart(instr->left()); + LRSubI* rsb = new(zone()) LRSubI(left, right); + LInstruction* result = DefineAsRegister(rsb); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { + LOperand* multiplier_op = UseRegisterAtStart(mul->left()); + LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); + LOperand* addend_op = UseRegisterAtStart(addend); + return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, + multiplicand_op)); +} + LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1347,6 +1419,14 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return result; } else if (instr->representation().IsDouble()) { + if (instr->left()->IsMul()) + return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); + + if (instr->right()->IsMul()) { + ASSERT(!instr->left()->IsMul()); + return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); + } + return DoArithmeticD(Token::ADD, instr); } else { ASSERT(instr->representation().IsTagged()); @@ -1412,7 +1492,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); + Representation r = instr->representation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1566,6 +1646,16 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegister(instr->index()); + LOperand* value = UseRegister(instr->value()); + LSeqStringSetChar* result = + new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = UseRegister(instr->length()); @@ -1617,8 +1707,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LOperand* temp1 = TempRegister(); LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL; - LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11) - : NULL; + LOperand* temp3 = FixedTemp(d11); res = DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2, @@ -1690,10 +1779,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp1 = TempRegister(); + LUnallocated* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); - return AssignEnvironment(result); + LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + return AssignEnvironment(Define(result, temp1)); } @@ -1861,53 +1950,40 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - ASSERT(instr->representation().IsTagged()); +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - LOperand* obj = UseRegisterAtStart(instr->object()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); - if (instr->RequiresHoleCheck()) AssignEnvironment(result); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( - HLoadKeyedFastDoubleElement* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* elements = UseTempRegister(instr->elements()); + ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastDoubleElement* result = - new(zone()) LLoadKeyedFastDoubleElement(elements, key); - return AssignEnvironment(DefineAsRegister(result)); -} + LLoadKeyed* result = NULL; + if (!instr->is_external()) { + LOperand* obj = NULL; + if (instr->representation().IsDouble()) { + obj = UseTempRegister(instr->elements()); + } else { + ASSERT(instr->representation().IsTagged()); + obj = UseRegisterAtStart(instr->elements()); + } + result = new(zone()) LLoadKeyed(obj, key); + } else { + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + LOperand* external_pointer = UseRegister(instr->elements()); + result = new(zone()) LLoadKeyed(external_pointer, key); + } -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); + DefineAsRegister(result); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? - AssignEnvironment(load_instr) : load_instr; + bool can_deoptimize = instr->RequiresHoleCheck() || + (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); + return can_deoptimize ? AssignEnvironment(result) : result; } @@ -1921,66 +1997,48 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* obj = UseTempRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastElement(obj, key, val); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( - HStoreKeyedFastDoubleElement* instr) { - ASSERT(instr->value()->representation().IsDouble()); - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* val = UseTempRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); - return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); -} + if (!instr->is_external()) { + ASSERT(instr->elements()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + LOperand* object = NULL; + LOperand* key = NULL; + LOperand* val = NULL; + + if (instr->value()->representation().IsDouble()) { + object = UseRegisterAtStart(instr->elements()); + val = UseTempRegister(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + ASSERT(instr->value()->representation().IsTagged()); + object = UseTempRegister(instr->elements()); + val = needs_write_barrier ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + key = needs_write_barrier ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } + return new(zone()) LStoreKeyed(object, key, val); + } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* external_pointer = UseRegister(instr->external_pointer()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->elements()->representation().IsExternal()); bool val_is_temp_register = elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register - ? UseTempRegister(instr->value()) + LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstant(instr->key()); - - return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, - key, - val); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* external_pointer = UseRegister(instr->elements()); + return new(zone()) LStoreKeyed(external_pointer, key, val); } @@ -2126,6 +2184,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { + ASSERT(argument_count_ == 0); allocator_->MarkAsOsrEntry(); current_block_->last_environment()->set_ast_id(instr->ast_id()); return AssignEnvironment(new(zone()) LOsrEntry); @@ -2164,12 +2223,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { - LOperand* arguments = UseRegister(instr->arguments()); + LOperand* args = UseRegister(instr->arguments()); LOperand* length = UseTempRegister(instr->length()); LOperand* index = UseRegister(instr->index()); - LAccessArgumentsAt* result = - new(zone()) LAccessArgumentsAt(arguments, length, index); - return AssignEnvironment(DefineAsRegister(result)); + return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } @@ -2204,7 +2261,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { + for (int i = instr->values()->length() - 1; i >= 0; --i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); @@ -2253,6 +2310,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { if (instr->arguments_var() != NULL) { inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } + inner->set_entry(instr); current_block_->UpdateEnvironment(inner); chunk_->AddInlinedClosure(instr->closure()); return NULL; @@ -2264,7 +2322,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { HEnvironment* env = current_block_->last_environment(); - if (instr->arguments_pushed()) { + if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); argument_count_ -= argument_count; @@ -2295,9 +2353,7 @@ LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { LOperand* map = UseRegister(instr->map()); - LOperand* scratch = TempRegister(); - return AssignEnvironment(DefineAsRegister( - new(zone()) LForInCacheArray(map, scratch))); + return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map))); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index e6e102f762..7397b4bc8d 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -125,18 +125,18 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastDoubleElement) \ - V(LoadKeyedFastElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ + V(MathExp) \ V(MathFloorOfDiv) \ V(MathMinMax) \ V(ModI) \ V(MulI) \ + V(MultiplyAddD) \ V(NumberTagD) \ V(NumberTagI) \ V(NumberTagU) \ @@ -150,6 +150,7 @@ class LCodeGen; V(Random) \ V(RegExpLiteral) \ V(Return) \ + V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -157,10 +158,8 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -169,6 +168,7 @@ class LCodeGen; V(StringCompareAndBranch) \ V(StringLength) \ V(SubI) \ + V(RSubI) \ V(TaggedToI) \ V(ThisFunction) \ V(Throw) \ @@ -261,9 +261,6 @@ class LInstruction: public ZoneObject { virtual bool HasResult() const = 0; virtual LOperand* result() = 0; - virtual int TempCount() = 0; - virtual LOperand* TempAt(int i) = 0; - LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } @@ -277,6 +274,10 @@ class LInstruction: public ZoneObject { virtual int InputCount() = 0; virtual LOperand* InputAt(int i) = 0; + friend class TempIterator; + virtual int TempCount() = 0; + virtual LOperand* TempAt(int i) = 0; + LEnvironment* environment_; SetOncePointer pointer_map_; HValue* hydrogen_value_; @@ -296,11 +297,6 @@ class LTemplateInstruction: public LInstruction { void set_result(LOperand* operand) { results_[0] = operand; } LOperand* result() { return results_[0]; } - LOperand* InputAt(int i) { return inputs_[i]; } - - int TempCount() { return T; } - LOperand* TempAt(int i) { return temps_[i]; } - protected: EmbeddedContainer results_; EmbeddedContainer inputs_; @@ -308,6 +304,10 @@ class LTemplateInstruction: public LInstruction { private: virtual int InputCount() { return I; } + virtual LOperand* InputAt(int i) { return inputs_[i]; } + + virtual int TempCount() { return T; } + virtual LOperand* TempAt(int i) { return temps_[i]; } }; @@ -525,6 +525,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { inputs_[0] = elements; } + LOperand* elements() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length") }; @@ -551,16 +553,22 @@ class LModI: public LTemplateInstruction<1, 2, 3> { // Used for the standard case. LModI(LOperand* left, LOperand* right, - LOperand* temp1, + LOperand* temp, LOperand* temp2, LOperand* temp3) { inputs_[0] = left; inputs_[1] = right; - temps_[0] = temp1; + temps_[0] = temp; temps_[1] = temp2; temps_[2] = temp3; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* temp3() { return temps_[2]; } + DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") DECLARE_HYDROGEN_ACCESSOR(Mod) }; @@ -573,6 +581,9 @@ class LDivI: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(Div) }; @@ -588,6 +599,10 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> { temps_[0] = temp; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div") DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) }; @@ -601,11 +616,33 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { temps_[0] = temp; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") DECLARE_HYDROGEN_ACCESSOR(Mul) }; +// Instruction for computing multiplier * multiplicand + addend. +class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> { + public: + LMultiplyAddD(LOperand* addend, LOperand* multiplier, + LOperand* multiplicand) { + inputs_[0] = addend; + inputs_[1] = multiplier; + inputs_[2] = multiplicand; + } + + LOperand* addend() { return inputs_[0]; } + LOperand* multiplier() { return inputs_[1]; } + LOperand* multiplicand() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") +}; + + class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -613,12 +650,15 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch) Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->GetInputRepresentation().IsDouble(); + return hydrogen()->representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -632,6 +672,9 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation") DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) @@ -640,6 +683,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { }; +class LMathExp: public LTemplateInstruction<1, 1, 3> { + public: + LMathExp(LOperand* value, + LOperand* double_temp, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = double_temp; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* double_temp() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") + + virtual void PrintDataTo(StringStream* stream); +}; + + class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -647,6 +714,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch) @@ -659,6 +729,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { inputs_[0] = left; } + LOperand* left() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch, "cmp-constant-eq-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch) @@ -671,6 +743,8 @@ class LIsNilAndBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) @@ -688,6 +762,9 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch) @@ -702,6 +779,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch") DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch) @@ -715,6 +795,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch") DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch) @@ -729,6 +811,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch, "is-undetectable-and-branch") DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch) @@ -744,6 +829,9 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, "string-compare-and-branch") DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch) @@ -760,6 +848,8 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch, "has-instance-type-and-branch") DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch) @@ -774,6 +864,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index") DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex) }; @@ -785,6 +877,8 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch, "has-cached-array-index-and-branch") DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch) @@ -800,6 +894,9 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch") DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch) @@ -815,6 +912,9 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) @@ -829,6 +929,9 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") }; @@ -840,6 +943,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, "instance-of-known-global") DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) @@ -879,6 +985,9 @@ class LBitI: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + Token::Value op() const { return hydrogen()->op(); } DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") @@ -895,7 +1004,8 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> { } Token::Value op() const { return op_; } - + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } bool can_deopt() const { return can_deopt_; } DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i") @@ -913,11 +1023,29 @@ class LSubI: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") DECLARE_HYDROGEN_ACCESSOR(Sub) }; +class LRSubI: public LTemplateInstruction<1, 2, 0> { + public: + LRSubI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i") + DECLARE_HYDROGEN_ACCESSOR(Sub) +}; + + class LConstantI: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") @@ -951,6 +1079,8 @@ class LBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Branch, "branch") DECLARE_HYDROGEN_ACCESSOR(Branch) @@ -965,6 +1095,9 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareMap) @@ -986,6 +1119,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length") DECLARE_HYDROGEN_ACCESSOR(JSArrayLength) }; @@ -997,6 +1132,8 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength, "fixed-array-base-length") DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength) @@ -1009,6 +1146,8 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length") }; @@ -1019,6 +1158,8 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind") DECLARE_HYDROGEN_ACCESSOR(ElementsKind) }; @@ -1031,6 +1172,9 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of") DECLARE_HYDROGEN_ACCESSOR(ValueOf) }; @@ -1043,31 +1187,39 @@ class LDateField: public LTemplateInstruction<1, 1, 1> { temps_[0] = temp; } + LOperand* date() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + Smi* index() const { return index_; } + DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field") DECLARE_HYDROGEN_ACCESSOR(ValueOf) - Smi* index() const { return index_; } private: Smi* index_; }; -class LSetDateField: public LTemplateInstruction<1, 2, 1> { +class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { public: - LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index) - : index_(index) { - inputs_[0] = date; - inputs_[1] = value; - temps_[0] = temp; + LSeqStringSetChar(String::Encoding encoding, + LOperand* string, + LOperand* index, + LOperand* value) : encoding_(encoding) { + inputs_[0] = string; + inputs_[1] = index; + inputs_[2] = value; } - DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field") - DECLARE_HYDROGEN_ACCESSOR(DateField) + String::Encoding encoding() { return encoding_; } + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } - int index() const { return index_; } + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) private: - int index_; + String::Encoding encoding_; }; @@ -1077,6 +1229,8 @@ class LThrow: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") }; @@ -1087,6 +1241,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i") }; @@ -1098,6 +1254,9 @@ class LAddI: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") DECLARE_HYDROGEN_ACCESSOR(Add) }; @@ -1110,6 +1269,9 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max") DECLARE_HYDROGEN_ACCESSOR(MathMinMax) }; @@ -1122,6 +1284,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(Power, "power") DECLARE_HYDROGEN_ACCESSOR(Power) }; @@ -1133,6 +1298,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> { inputs_[0] = global_object; } + LOperand* global_object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Random, "random") DECLARE_HYDROGEN_ACCESSOR(Random) }; @@ -1147,6 +1314,8 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> { } Token::Value op() const { return op_; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } virtual Opcode opcode() const { return LInstruction::kArithmeticD; } virtual void CompileToNative(LCodeGen* generator); @@ -1165,12 +1334,14 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + Token::Value op() const { return op_; } + virtual Opcode opcode() const { return LInstruction::kArithmeticT; } virtual void CompileToNative(LCodeGen* generator); virtual const char* Mnemonic() const; - Token::Value op() const { return op_; } - private: Token::Value op_; }; @@ -1182,6 +1353,8 @@ class LReturn: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1192,6 +1365,8 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> { inputs_[0] = object; } + LOperand* object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field") DECLARE_HYDROGEN_ACCESSOR(LoadNamedField) }; @@ -1203,10 +1378,10 @@ class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> { inputs_[0] = object; } + LOperand* object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic) - - LOperand* object() { return inputs_[0]; } }; @@ -1216,10 +1391,11 @@ class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> { inputs_[0] = object; } + LOperand* object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) - LOperand* object() { return inputs_[0]; } Handle name() const { return hydrogen()->name(); } }; @@ -1230,10 +1406,10 @@ class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> { inputs_[0] = function; } + LOperand* function() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype") DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype) - - LOperand* function() { return inputs_[0]; } }; @@ -1243,6 +1419,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> { inputs_[0] = object; } + LOperand* object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") }; @@ -1253,75 +1431,48 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { inputs_[0] = object; } + LOperand* object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer, "load-external-array-pointer") }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + LLoadKeyed(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { - inputs_[0] = elements; - inputs_[1] = key; + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, - "load-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { - inputs_[0] = external_pointer; - inputs_[1] = key; + bool is_external() const { + return hydrogen()->is_external(); } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } + virtual void PrintDataTo(StringStream* stream); uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedGeneric(LOperand* obj, LOperand* key) { - inputs_[0] = obj; + LLoadKeyedGeneric(LOperand* object, LOperand* key) { + inputs_[0] = object; inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") - LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") }; @@ -1338,10 +1489,11 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> { inputs_[0] = global_object; } + LOperand* global_object() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) - LOperand* global_object() { return inputs_[0]; } Handle name() const { return hydrogen()->name(); } bool for_typeof() const { return hydrogen()->for_typeof(); } }; @@ -1354,10 +1506,11 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> { temps_[0] = temp; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell) - - LOperand* value() { return inputs_[0]; } }; @@ -1369,12 +1522,13 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> { inputs_[1] = value; } + LOperand* global_object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic") DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric) - LOperand* global_object() { return InputAt(0); } Handle name() const { return hydrogen()->name(); } - LOperand* value() { return InputAt(1); } StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; @@ -1385,10 +1539,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> { inputs_[0] = context; } + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot") DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot) - LOperand* context() { return InputAt(0); } int slot_index() { return hydrogen()->slot_index(); } virtual void PrintDataTo(StringStream* stream); @@ -1402,11 +1557,12 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> { inputs_[1] = value; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot") DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot) - LOperand* context() { return InputAt(0); } - LOperand* value() { return InputAt(1); } int slot_index() { return hydrogen()->slot_index(); } virtual void PrintDataTo(StringStream* stream); @@ -1419,6 +1575,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument") }; @@ -1455,9 +1613,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> { inputs_[0] = context; } - DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context") + LOperand* context() { return inputs_[0]; } - LOperand* context() { return InputAt(0); } + DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context") }; @@ -1476,7 +1634,7 @@ class LGlobalObject: public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object") - LOperand* context() { return InputAt(0); } + LOperand* context() { return inputs_[0]; } }; @@ -1486,9 +1644,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> { inputs_[0] = global_object; } - DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") + LOperand* global_object() { return inputs_[0]; } - LOperand* global() { return InputAt(0); } + DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver") }; @@ -1510,11 +1668,11 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { inputs_[0] = function; } + LOperand* function() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) - LOperand* function() { return inputs_[0]; } - virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } @@ -1528,6 +1686,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> { inputs_[0] = key; } + LOperand* key() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) @@ -1556,10 +1716,11 @@ class LCallFunction: public LTemplateInstruction<1, 1, 0> { inputs_[0] = function; } + LOperand* function() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) - LOperand* function() { return inputs_[0]; } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -1594,6 +1755,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { inputs_[0] = constructor; } + LOperand* constructor() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") DECLARE_HYDROGEN_ACCESSOR(CallNew) @@ -1619,6 +1782,8 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double") }; @@ -1629,6 +1794,8 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double") }; @@ -1639,6 +1806,8 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i") }; @@ -1649,18 +1818,24 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u") }; class LNumberTagD: public LTemplateInstruction<1, 1, 2> { public: - LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) { + LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) { inputs_[0] = value; - temps_[0] = temp1; + temps_[0] = temp; temps_[1] = temp2; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") }; @@ -1668,12 +1843,16 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { // Sometimes truncating conversion from a tagged value to an int32. class LDoubleToI: public LTemplateInstruction<1, 1, 2> { public: - LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) { + LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) { inputs_[0] = value; - temps_[0] = temp1; + temps_[0] = temp; temps_[1] = temp2; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i") DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) @@ -1685,15 +1864,20 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 2> { class LTaggedToI: public LTemplateInstruction<1, 1, 3> { public: LTaggedToI(LOperand* value, - LOperand* temp1, + LOperand* temp, LOperand* temp2, LOperand* temp3) { inputs_[0] = value; - temps_[0] = temp1; + temps_[0] = temp; temps_[1] = temp2; temps_[2] = temp3; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* temp3() { return temps_[2]; } + DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) @@ -1707,6 +1891,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag") }; @@ -1717,6 +1903,8 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag") DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -1729,10 +1917,11 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") - + LOperand* value() { return inputs_[0]; } bool needs_check() const { return needs_check_; } + DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag") + private: bool needs_check_; }; @@ -1740,20 +1929,21 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) { - inputs_[0] = obj; - inputs_[1] = val; + LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) { + inputs_[0] = object; + inputs_[1] = value; temps_[0] = temp; } + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) virtual void PrintDataTo(StringStream* stream); - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } - Handle name() const { return hydrogen()->name(); } bool is_in_object() { return hydrogen()->is_in_object(); } int offset() { return hydrogen()->offset(); } @@ -1763,109 +1953,67 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) { - inputs_[0] = obj; - inputs_[1] = val; + LStoreNamedGeneric(LOperand* object, LOperand* value) { + inputs_[0] = object; + inputs_[1] = value; } + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) virtual void PrintDataTo(StringStream* stream); - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } Handle name() const { return hydrogen()->name(); } StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; -class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { - inputs_[0] = obj; + LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { + inputs_[0] = object; inputs_[1] = key; - inputs_[2] = val; + inputs_[2] = value; } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) - - virtual void PrintDataTo(StringStream* stream); - - LOperand* object() { return inputs_[0]; } + bool is_external() const { return hydrogen()->is_external(); } + LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedFastDoubleElement(LOperand* elements, - LOperand* key, - LOperand* val) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = val; + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, - "store-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) { inputs_[0] = obj; inputs_[1] = key; - inputs_[2] = val; + inputs_[2] = value; } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) - - virtual void PrintDataTo(StringStream* stream); - LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } - StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } -}; -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* val) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = val; - } + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) + virtual void PrintDataTo(StringStream* stream); - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - uint32_t additional_index() const { return hydrogen()->index_offset(); } + StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; @@ -1873,21 +2021,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { public: LTransitionElementsKind(LOperand* object, LOperand* new_map_temp, - LOperand* temp_reg) { + LOperand* temp) { inputs_[0] = object; temps_[0] = new_map_temp; - temps_[1] = temp_reg; + temps_[1] = temp; } + LOperand* object() { return inputs_[0]; } + LOperand* new_map_temp() { return temps_[0]; } + LOperand* temp() { return temps_[1]; } + DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind, "transition-elements-kind") DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind) virtual void PrintDataTo(StringStream* stream); - LOperand* object() { return inputs_[0]; } - LOperand* new_map_reg() { return temps_[0]; } - LOperand* temp_reg() { return temps_[1]; } Handle original_map() { return hydrogen()->original_map(); } Handle transitioned_map() { return hydrogen()->transitioned_map(); } }; @@ -1900,11 +2049,11 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> { inputs_[1] = right; } - DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") - DECLARE_HYDROGEN_ACCESSOR(StringAdd) - LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") + DECLARE_HYDROGEN_ACCESSOR(StringAdd) }; @@ -1916,11 +2065,11 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> { inputs_[1] = index; } - DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") - DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) - LOperand* string() { return inputs_[0]; } LOperand* index() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") + DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) }; @@ -1930,10 +2079,10 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> { inputs_[0] = char_code; } + LOperand* char_code() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) - - LOperand* char_code() { return inputs_[0]; } }; @@ -1943,10 +2092,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> { inputs_[0] = string; } + LOperand* string() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length") DECLARE_HYDROGEN_ACCESSOR(StringLength) - - LOperand* string() { return inputs_[0]; } }; @@ -1956,7 +2105,7 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } - LOperand* value() { return InputAt(0); } + LOperand* value() { return inputs_[0]; } DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") DECLARE_HYDROGEN_ACCESSOR(CheckFunction) @@ -1969,6 +2118,8 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type") DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType) }; @@ -1980,18 +2131,23 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") DECLARE_HYDROGEN_ACCESSOR(CheckMaps) }; -class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { +class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> { public: - LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) { - temps_[0] = temp1; + LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { + temps_[0] = temp; temps_[1] = temp2; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) @@ -2006,6 +2162,8 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi") }; @@ -2016,18 +2174,21 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi") }; class LClampDToUint8: public LTemplateInstruction<1, 1, 1> { public: - LClampDToUint8(LOperand* value, LOperand* temp) { - inputs_[0] = value; + LClampDToUint8(LOperand* unclamped, LOperand* temp) { + inputs_[0] = unclamped; temps_[0] = temp; } LOperand* unclamped() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8") }; @@ -2035,8 +2196,8 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> { class LClampIToUint8: public LTemplateInstruction<1, 1, 0> { public: - explicit LClampIToUint8(LOperand* value) { - inputs_[0] = value; + explicit LClampIToUint8(LOperand* unclamped) { + inputs_[0] = unclamped; } LOperand* unclamped() { return inputs_[0]; } @@ -2047,12 +2208,13 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> { class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { public: - LClampTToUint8(LOperand* value, LOperand* temp) { - inputs_[0] = value; + LClampTToUint8(LOperand* unclamped, LOperand* temp) { + inputs_[0] = unclamped; temps_[0] = temp; } LOperand* unclamped() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") }; @@ -2060,11 +2222,14 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { class LAllocateObject: public LTemplateInstruction<1, 0, 2> { public: - LAllocateObject(LOperand* temp1, LOperand* temp2) { - temps_[0] = temp1; + LAllocateObject(LOperand* temp, LOperand* temp2) { + temps_[0] = temp; temps_[1] = temp2; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object") DECLARE_HYDROGEN_ACCESSOR(AllocateObject) }; @@ -2113,6 +2278,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties") DECLARE_HYDROGEN_ACCESSOR(ToFastProperties) }; @@ -2124,6 +2291,8 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") }; @@ -2134,6 +2303,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch") DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch) @@ -2149,6 +2320,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> { temps_[0] = temp; } + LOperand* temp() { return temps_[0]; } + DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch, "is-construct-call-and-branch") }; @@ -2156,15 +2329,15 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> { class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { public: - LDeleteProperty(LOperand* obj, LOperand* key) { - inputs_[0] = obj; + LDeleteProperty(LOperand* object, LOperand* key) { + inputs_[0] = object; inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property") - LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property") }; @@ -2229,15 +2402,13 @@ class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> { }; -class LForInCacheArray: public LTemplateInstruction<1, 1, 1> { +class LForInCacheArray: public LTemplateInstruction<1, 1, 0> { public: - explicit LForInCacheArray(LOperand* map, LOperand* scratch) { + explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; - temps_[0] = scratch; } LOperand* map() { return inputs_[0]; } - LOperand* scratch() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array") @@ -2311,6 +2482,9 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); + LInstruction* DoRSub(HSub* instr); + static bool HasMagicNumberForDivisor(int32_t divisor); static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index e9ba5eec72..06b021669b 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -146,8 +146,20 @@ bool LCodeGen::GeneratePrologue() { __ bind(&ok); } - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. + + info()->set_prologue_offset(masm_->pc_offset()); + { + PredictableCodeSizeScope predictible_code_size_scope( + masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + // Load undefined value here, so the value is ready for the loop + // below. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); + } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); @@ -222,7 +234,30 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + if (FLAG_code_comments) { + HValue* hydrogen = instr->hydrogen_value(); + if (hydrogen != NULL) { + if (hydrogen->IsChange()) { + HValue* changed_value = HChange::cast(hydrogen)->value(); + int use_id = 0; + const char* use_mnemo = "dead"; + if (hydrogen->UseCount() >= 1) { + HValue* use_value = hydrogen->uses().value(); + use_id = use_value->id(); + use_mnemo = use_value->Mnemonic(); + } + Comment(";;; @%d: %s. ", + current_instruction_, instr->Mnemonic(), + changed_value->id(), changed_value->Mnemonic(), + use_id, use_mnemo); + } else { + Comment(";;; @%d: %s. <#%d>", current_instruction_, + instr->Mnemonic(), hydrogen->id()); + } + } else { + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + } + } instr->CompileToNative(this); } } @@ -464,7 +499,9 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { void LCodeGen::WriteTranslation(LEnvironment* environment, - Translation* translation) { + Translation* translation, + int* arguments_index, + int* arguments_count) { if (environment == NULL) return; // The translation includes one command per value in the environment. @@ -472,7 +509,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, // The output frame height does not include the parameters. int height = translation_size - environment->parameter_count(); - WriteTranslation(environment->outer(), translation); + // Function parameters are arguments to the outermost environment. The + // arguments index points to the first element of a sequence of tagged + // values on the stack that represent the arguments. This needs to be + // kept in sync with the LArgumentsElements implementation. + *arguments_index = -environment->parameter_count(); + *arguments_count = environment->parameter_count(); + + WriteTranslation(environment->outer(), + translation, + arguments_index, + arguments_count); int closure_id = *info()->closure() != *environment->closure() ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -498,6 +545,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; } + + // Inlined frames which push their arguments cause the index to be + // bumped and a new stack area to be used for materialization. + if (environment->entry() != NULL && + environment->entry()->arguments_pushed()) { + *arguments_index = *arguments_index < 0 + ? GetStackSlotCount() + : *arguments_index + *arguments_count; + *arguments_count = environment->entry()->arguments_count() + 1; + } + for (int i = 0; i < translation_size; ++i) { LOperand* value = environment->values()->at(i); // spilled_registers_ and spilled_double_registers_ are either @@ -509,7 +567,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, AddToTranslation(translation, environment->spilled_registers()[value->index()], environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i)); + environment->HasUint32ValueAt(i), + *arguments_index, + *arguments_count); } else if ( value->IsDoubleRegister() && environment->spilled_double_registers()[value->index()] != NULL) { @@ -518,14 +578,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, translation, environment->spilled_double_registers()[value->index()], false, - false); + false, + *arguments_index, + *arguments_count); } } AddToTranslation(translation, value, environment->HasTaggedValueAt(i), - environment->HasUint32ValueAt(i)); + environment->HasUint32ValueAt(i), + *arguments_index, + *arguments_count); } } @@ -533,12 +597,14 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, - bool is_uint32) { + bool is_uint32, + int arguments_index, + int arguments_count) { if (op == NULL) { // TODO(twuerthinger): Introduce marker operands to indicate that this value // is not present and must be reconstructed from the deoptimizer. Currently // this is only used for the arguments object. - translation->StoreArgumentsObject(); + translation->StoreArgumentsObject(arguments_index, arguments_count); } else if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -577,22 +643,24 @@ void LCodeGen::AddToTranslation(Translation* translation, void LCodeGen::CallCode(Handle code, RelocInfo::Mode mode, - LInstruction* instr) { - CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); + LInstruction* instr, + TargetAddressStorageMode storage_mode) { + CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); } void LCodeGen::CallCodeGeneric(Handle code, RelocInfo::Mode mode, LInstruction* instr, - SafepointMode safepoint_mode) { + SafepointMode safepoint_mode, + TargetAddressStorageMode storage_mode) { ASSERT(instr != NULL); // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); - __ Call(code, mode); + __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); // Signal that we don't inline smi code before these stubs in the @@ -644,15 +712,16 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, int frame_count = 0; int jsframe_count = 0; + int args_index = 0; + int args_count = 0; for (LEnvironment* e = environment; e != NULL; e = e->outer()) { ++frame_count; if (e->frame_type() == JS_FUNCTION) { ++jsframe_count; } } - Translation translation(&translations_, frame_count, jsframe_count, - zone()); - WriteTranslation(environment, &translation); + Translation translation(&translations_, frame_count, jsframe_count, zone()); + WriteTranslation(environment, &translation, &args_index, &args_count); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); environment->Register(deoptimization_index, @@ -919,7 +988,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { void LCodeGen::DoModI(LModI* instr) { if (instr->hydrogen()->HasPowerOf2Divisor()) { - Register dividend = ToRegister(instr->InputAt(0)); + Register dividend = ToRegister(instr->left()); Register result = ToRegister(instr->result()); int32_t divisor = @@ -944,112 +1013,135 @@ void LCodeGen::DoModI(LModI* instr) { } // These registers hold untagged 32 bit values. - Register left = ToRegister(instr->InputAt(0)); - Register right = ToRegister(instr->InputAt(1)); + Register left = ToRegister(instr->left()); + Register right = ToRegister(instr->right()); Register result = ToRegister(instr->result()); + Label done; - Register scratch = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); - DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1)); - DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2)); - DwVfpRegister quotient = double_scratch0(); - - ASSERT(!dividend.is(divisor)); - ASSERT(!dividend.is(quotient)); - ASSERT(!divisor.is(quotient)); - ASSERT(!scratch.is(left)); - ASSERT(!scratch.is(right)); - ASSERT(!scratch.is(result)); + if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatures::Scope scope(SUDIV); + // Check for x % 0. + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + __ cmp(right, Operand(0)); + DeoptimizeIf(eq, instr->environment()); + } - Label done, vfp_modulo, both_positive, right_negative; + // For r3 = r1 % r2; we can have the following ARM code + // sdiv r3, r1, r2 + // mls r3, r3, r2, r1 - // Check for x % 0. - if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); - DeoptimizeIf(eq, instr->environment()); - } + __ sdiv(result, left, right); + __ mls(result, result, right, left); + __ cmp(result, Operand(0)); + __ b(ne, &done); - __ Move(result, left); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ cmp(left, Operand(0)); + DeoptimizeIf(lt, instr->environment()); + } + } else { + Register scratch = scratch0(); + Register scratch2 = ToRegister(instr->temp()); + DwVfpRegister dividend = ToDoubleRegister(instr->temp2()); + DwVfpRegister divisor = ToDoubleRegister(instr->temp3()); + DwVfpRegister quotient = double_scratch0(); + + ASSERT(!dividend.is(divisor)); + ASSERT(!dividend.is(quotient)); + ASSERT(!divisor.is(quotient)); + ASSERT(!scratch.is(left)); + ASSERT(!scratch.is(right)); + ASSERT(!scratch.is(result)); + + Label vfp_modulo, both_positive, right_negative; + + // Check for x % 0. + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + __ cmp(right, Operand(0)); + DeoptimizeIf(eq, instr->environment()); + } - // (0 % x) must yield 0 (if x is finite, which is the case here). - __ cmp(left, Operand(0)); - __ b(eq, &done); - // Preload right in a vfp register. - __ vmov(divisor.low(), right); - __ b(lt, &vfp_modulo); + __ Move(result, left); - __ cmp(left, Operand(right)); - __ b(lt, &done); - - // Check for (positive) power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, - scratch, - &right_negative, - &both_positive); - // Perform modulo operation (scratch contains right - 1). - __ and_(result, scratch, Operand(left)); - __ b(&done); + // (0 % x) must yield 0 (if x is finite, which is the case here). + __ cmp(left, Operand(0)); + __ b(eq, &done); + // Preload right in a vfp register. + __ vmov(divisor.low(), right); + __ b(lt, &vfp_modulo); - __ bind(&right_negative); - // Negate right. The sign of the divisor does not matter. - __ rsb(right, right, Operand(0)); - - __ bind(&both_positive); - const int kUnfolds = 3; - // If the right hand side is smaller than the (nonnegative) - // left hand side, the left hand side is the result. - // Else try a few subtractions of the left hand side. - __ mov(scratch, left); - for (int i = 0; i < kUnfolds; i++) { - // Check if the left hand side is less or equal than the - // the right hand side. - __ cmp(scratch, Operand(right)); - __ mov(result, scratch, LeaveCC, lt); + __ cmp(left, Operand(right)); __ b(lt, &done); - // If not, reduce the left hand side by the right hand - // side and check again. - if (i < kUnfolds - 1) __ sub(scratch, scratch, right); - } - - __ bind(&vfp_modulo); - // Load the arguments in VFP registers. - // The divisor value is preloaded before. Be careful that 'right' is only live - // on entry. - __ vmov(dividend.low(), left); - // From here on don't use right as it may have been reallocated (for example - // to scratch2). - right = no_reg; - - __ vcvt_f64_s32(dividend, dividend.low()); - __ vcvt_f64_s32(divisor, divisor.low()); - - // We do not care about the sign of the divisor. - __ vabs(divisor, divisor); - // Compute the quotient and round it to a 32bit integer. - __ vdiv(quotient, dividend, divisor); - __ vcvt_s32_f64(quotient.low(), quotient); - __ vcvt_f64_s32(quotient, quotient.low()); - - // Compute the remainder in result. - DwVfpRegister double_scratch = dividend; - __ vmul(double_scratch, divisor, quotient); - __ vcvt_s32_f64(double_scratch.low(), double_scratch); - __ vmov(scratch, double_scratch.low()); - - if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ sub(result, left, scratch); - } else { - Label ok; - // Check for -0. - __ sub(scratch2, left, scratch, SetCC); - __ b(ne, &ok); - __ cmp(left, Operand(0)); - DeoptimizeIf(mi, instr->environment()); - __ bind(&ok); - // Load the result and we are done. - __ mov(result, scratch2); - } + // Check for (positive) power of two on the right hand side. + __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, + scratch, + &right_negative, + &both_positive); + // Perform modulo operation (scratch contains right - 1). + __ and_(result, scratch, Operand(left)); + __ b(&done); + + __ bind(&right_negative); + // Negate right. The sign of the divisor does not matter. + __ rsb(right, right, Operand(0)); + + __ bind(&both_positive); + const int kUnfolds = 3; + // If the right hand side is smaller than the (nonnegative) + // left hand side, the left hand side is the result. + // Else try a few subtractions of the left hand side. + __ mov(scratch, left); + for (int i = 0; i < kUnfolds; i++) { + // Check if the left hand side is less or equal than the + // the right hand side. + __ cmp(scratch, Operand(right)); + __ mov(result, scratch, LeaveCC, lt); + __ b(lt, &done); + // If not, reduce the left hand side by the right hand + // side and check again. + if (i < kUnfolds - 1) __ sub(scratch, scratch, right); + } + + __ bind(&vfp_modulo); + // Load the arguments in VFP registers. + // The divisor value is preloaded before. Be careful that 'right' + // is only live on entry. + __ vmov(dividend.low(), left); + // From here on don't use right as it may have been reallocated + // (for example to scratch2). + right = no_reg; + + __ vcvt_f64_s32(dividend, dividend.low()); + __ vcvt_f64_s32(divisor, divisor.low()); + + // We do not care about the sign of the divisor. + __ vabs(divisor, divisor); + // Compute the quotient and round it to a 32bit integer. + __ vdiv(quotient, dividend, divisor); + __ vcvt_s32_f64(quotient.low(), quotient); + __ vcvt_f64_s32(quotient, quotient.low()); + + // Compute the remainder in result. + DwVfpRegister double_scratch = dividend; + __ vmul(double_scratch, divisor, quotient); + __ vcvt_s32_f64(double_scratch.low(), double_scratch); + __ vmov(scratch, double_scratch.low()); + + if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ sub(result, left, scratch); + } else { + Label ok; + // Check for -0. + __ sub(scratch2, left, scratch, SetCC); + __ b(ne, &ok); + __ cmp(left, Operand(0)); + DeoptimizeIf(mi, instr->environment()); + __ bind(&ok); + // Load the result and we are done. + __ mov(result, scratch2); + } + } __ bind(&done); } @@ -1154,15 +1246,18 @@ void LCodeGen::DoDivI(LDivI* instr) { DeferredDivI(LCodeGen* codegen, LDivI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); + codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(), + instr_->left(), + instr_->right(), + Token::DIV); } virtual LInstruction* instr() { return instr_; } private: LDivI* instr_; }; - const Register left = ToRegister(instr->InputAt(0)); - const Register right = ToRegister(instr->InputAt(1)); + const Register left = ToRegister(instr->left()); + const Register right = ToRegister(instr->right()); const Register scratch = scratch0(); const Register result = ToRegister(instr->result()); @@ -1229,17 +1324,29 @@ void LCodeGen::DoDivI(LDivI* instr) { } +void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { + DwVfpRegister addend = ToDoubleRegister(instr->addend()); + DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); + DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); + + // This is computed in-place. + ASSERT(addend.is(ToDoubleRegister(instr->result()))); + + __ vmla(addend, multiplier, multiplicand); +} + + void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { const Register result = ToRegister(instr->result()); - const Register left = ToRegister(instr->InputAt(0)); - const Register remainder = ToRegister(instr->TempAt(0)); + const Register left = ToRegister(instr->left()); + const Register remainder = ToRegister(instr->temp()); const Register scratch = scratch0(); // We only optimize this for division by constants, because the standard // integer division routine is usually slower than transitionning to VFP. // This could be optimized on processors with SDIV available. - ASSERT(instr->InputAt(1)->IsConstantOperand()); - int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); + ASSERT(instr->right()->IsConstantOperand()); + int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); if (divisor < 0) { __ cmp(left, Operand(0)); DeoptimizeIf(eq, instr->environment()); @@ -1257,11 +1364,12 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { } -template -void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, +void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, + LOperand* left_argument, + LOperand* right_argument, Token::Value op) { - Register left = ToRegister(instr->InputAt(0)); - Register right = ToRegister(instr->InputAt(1)); + Register left = ToRegister(left_argument); + Register right = ToRegister(right_argument); PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); // Move left to r1 and right to r0 for the stub call. @@ -1280,7 +1388,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, } BinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), + RecordSafepointWithRegistersAndDoubles(pointer_map, 0, Safepoint::kNoLazyDeopt); // Overwrite the stored value of r0 with the result of the stub. @@ -1292,8 +1400,8 @@ void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); // Note that result may alias left. - Register left = ToRegister(instr->InputAt(0)); - LOperand* right_op = instr->InputAt(1); + Register left = ToRegister(instr->left()); + LOperand* right_op = instr->right(); bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); bool bailout_on_minus_zero = @@ -1360,7 +1468,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } else { Register right = EmitLoadRegister(right_op, scratch); if (bailout_on_minus_zero) { - __ orr(ToRegister(instr->TempAt(0)), left, right); + __ orr(ToRegister(instr->temp()), left, right); } if (can_overflow) { @@ -1377,7 +1485,7 @@ void LCodeGen::DoMulI(LMulI* instr) { Label done; __ cmp(result, Operand(0)); __ b(ne, &done); - __ cmp(ToRegister(instr->TempAt(0)), Operand(0)); + __ cmp(ToRegister(instr->temp()), Operand(0)); DeoptimizeIf(mi, instr->environment()); __ bind(&done); } @@ -1386,8 +1494,8 @@ void LCodeGen::DoMulI(LMulI* instr) { void LCodeGen::DoBitI(LBitI* instr) { - LOperand* left_op = instr->InputAt(0); - LOperand* right_op = instr->InputAt(1); + LOperand* left_op = instr->left(); + LOperand* right_op = instr->right(); ASSERT(left_op->IsRegister()); Register left = ToRegister(left_op); Register result = ToRegister(instr->result()); @@ -1420,14 +1528,17 @@ void LCodeGen::DoBitI(LBitI* instr) { void LCodeGen::DoShiftI(LShiftI* instr) { // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so // result may alias either of them. - LOperand* right_op = instr->InputAt(1); - Register left = ToRegister(instr->InputAt(0)); + LOperand* right_op = instr->right(); + Register left = ToRegister(instr->left()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); if (right_op->IsRegister()) { // Mask the right_op operand. __ and_(scratch, ToRegister(right_op), Operand(0x1F)); switch (instr->op()) { + case Token::ROR: + __ mov(result, Operand(left, ROR, scratch)); + break; case Token::SAR: __ mov(result, Operand(left, ASR, scratch)); break; @@ -1451,6 +1562,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right_op)); uint8_t shift_count = static_cast(value & 0x1F); switch (instr->op()) { + case Token::ROR: + if (shift_count != 0) { + __ mov(result, Operand(left, ROR, shift_count)); + } else { + __ Move(result, left); + } + break; case Token::SAR: if (shift_count != 0) { __ mov(result, Operand(left, ASR, shift_count)); @@ -1485,8 +1603,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) { void LCodeGen::DoSubI(LSubI* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); + LOperand* left = instr->left(); + LOperand* right = instr->right(); LOperand* result = instr->result(); bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); SBit set_cond = can_overflow ? SetCC : LeaveCC; @@ -1505,6 +1623,27 @@ void LCodeGen::DoSubI(LSubI* instr) { } +void LCodeGen::DoRSubI(LRSubI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + LOperand* result = instr->result(); + bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); + SBit set_cond = can_overflow ? SetCC : LeaveCC; + + if (right->IsStackSlot() || right->IsArgument()) { + Register right_reg = EmitLoadRegister(right, ip); + __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); + } else { + ASSERT(right->IsRegister() || right->IsConstantOperand()); + __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); + } + + if (can_overflow) { + DeoptimizeIf(vs, instr->environment()); + } +} + + void LCodeGen::DoConstantI(LConstantI* instr) { ASSERT(instr->result()->IsRegister()); __ mov(ToRegister(instr->result()), Operand(instr->value())); @@ -1515,7 +1654,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); double v = instr->value(); - __ Vmov(result, v); + __ Vmov(result, v, scratch0()); } @@ -1532,28 +1671,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) { Register result = ToRegister(instr->result()); - Register array = ToRegister(instr->InputAt(0)); + Register array = ToRegister(instr->value()); __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset)); } void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) { Register result = ToRegister(instr->result()); - Register array = ToRegister(instr->InputAt(0)); + Register array = ToRegister(instr->value()); __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset)); } void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { Register result = ToRegister(instr->result()); - Register map = ToRegister(instr->InputAt(0)); + Register map = ToRegister(instr->value()); __ EnumLength(result, map); } void LCodeGen::DoElementsKind(LElementsKind* instr) { Register result = ToRegister(instr->result()); - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); // Load map into |result|. __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset)); @@ -1566,9 +1705,9 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) { void LCodeGen::DoValueOf(LValueOf* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); - Register map = ToRegister(instr->TempAt(0)); + Register map = ToRegister(instr->temp()); Label done; // If the object is a smi return the object. @@ -1587,9 +1726,9 @@ void LCodeGen::DoValueOf(LValueOf* instr) { void LCodeGen::DoDateField(LDateField* instr) { - Register object = ToRegister(instr->InputAt(0)); + Register object = ToRegister(instr->date()); Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->TempAt(0)); + Register scratch = ToRegister(instr->temp()); Smi* index = instr->index(); Label runtime, done; ASSERT(object.is(result)); @@ -1625,15 +1764,24 @@ void LCodeGen::DoDateField(LDateField* instr) { } +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + SeqStringSetCharGenerator::Generate(masm(), + instr->encoding(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->value())); +} + + void LCodeGen::DoBitNotI(LBitNotI* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); __ mvn(result, Operand(input)); } void LCodeGen::DoThrow(LThrow* instr) { - Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); + Register input_reg = EmitLoadRegister(instr->value(), ip); __ push(input_reg); CallRuntime(Runtime::kThrow, 1, instr); @@ -1644,8 +1792,8 @@ void LCodeGen::DoThrow(LThrow* instr) { void LCodeGen::DoAddI(LAddI* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); + LOperand* left = instr->left(); + LOperand* right = instr->right(); LOperand* result = instr->result(); bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); SBit set_cond = can_overflow ? SetCC : LeaveCC; @@ -1665,8 +1813,8 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); + LOperand* left = instr->left(); + LOperand* right = instr->right(); HMathMinMax::Operation operation = instr->hydrogen()->operation(); Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; if (instr->hydrogen()->representation().IsInteger32()) { @@ -1727,8 +1875,8 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->InputAt(0)); - DoubleRegister right = ToDoubleRegister(instr->InputAt(1)); + DoubleRegister left = ToDoubleRegister(instr->left()); + DoubleRegister right = ToDoubleRegister(instr->right()); DoubleRegister result = ToDoubleRegister(instr->result()); switch (instr->op()) { case Token::ADD: @@ -1767,8 +1915,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { void LCodeGen::DoArithmeticT(LArithmeticT* instr) { - ASSERT(ToRegister(instr->InputAt(0)).is(r1)); - ASSERT(ToRegister(instr->InputAt(1)).is(r0)); + ASSERT(ToRegister(instr->left()).is(r1)); + ASSERT(ToRegister(instr->right()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); @@ -1813,11 +1961,11 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { - Register reg = ToRegister(instr->InputAt(0)); + Register reg = ToRegister(instr->value()); __ cmp(reg, Operand(0)); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); // Test the double value. Zero and NaN are false. @@ -1826,7 +1974,7 @@ void LCodeGen::DoBranch(LBranch* instr) { EmitBranch(true_block, false_block, eq); } else { ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->InputAt(0)); + Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { __ CompareRoot(reg, Heap::kTrueValueRootIndex); @@ -1965,8 +2113,8 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { - LOperand* left = instr->InputAt(0); - LOperand* right = instr->InputAt(1); + LOperand* left = instr->left(); + LOperand* right = instr->right(); int false_block = chunk_->LookupDestination(instr->false_block_id()); int true_block = chunk_->LookupDestination(instr->true_block_id()); Condition cond = TokenToCondition(instr->op(), false); @@ -2006,8 +2154,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { - Register left = ToRegister(instr->InputAt(0)); - Register right = ToRegister(instr->InputAt(1)); + Register left = ToRegister(instr->left()); + Register right = ToRegister(instr->right()); int false_block = chunk_->LookupDestination(instr->false_block_id()); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -2017,7 +2165,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { - Register left = ToRegister(instr->InputAt(0)); + Register left = ToRegister(instr->left()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2028,7 +2176,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register scratch = scratch0(); - Register reg = ToRegister(instr->InputAt(0)); + Register reg = ToRegister(instr->value()); int false_block = chunk_->LookupDestination(instr->false_block_id()); // If the expression is known to be untagged or a smi, then it's definitely @@ -2096,8 +2244,8 @@ Condition LCodeGen::EmitIsObject(Register input, void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { - Register reg = ToRegister(instr->InputAt(0)); - Register temp1 = ToRegister(instr->TempAt(0)); + Register reg = ToRegister(instr->value()); + Register temp1 = ToRegister(instr->temp()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2122,8 +2270,8 @@ Condition LCodeGen::EmitIsString(Register input, void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { - Register reg = ToRegister(instr->InputAt(0)); - Register temp1 = ToRegister(instr->TempAt(0)); + Register reg = ToRegister(instr->value()); + Register temp1 = ToRegister(instr->temp()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2140,15 +2288,15 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Register input_reg = EmitLoadRegister(instr->InputAt(0), ip); + Register input_reg = EmitLoadRegister(instr->value(), ip); __ tst(input_reg, Operand(kSmiTagMask)); EmitBranch(true_block, false_block, eq); } void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { - Register input = ToRegister(instr->InputAt(0)); - Register temp = ToRegister(instr->TempAt(0)); + Register input = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2218,7 +2366,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { Register scratch = scratch0(); - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -2233,10 +2381,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); - __ AbortIfNotString(input); + __ AssertString(input); __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); __ IndexFromHash(result, result); @@ -2245,7 +2393,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { void LCodeGen::DoHasCachedArrayIndexAndBranch( LHasCachedArrayIndexAndBranch* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register scratch = scratch0(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -2326,9 +2474,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true, void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register temp = scratch0(); - Register temp2 = ToRegister(instr->TempAt(0)); + Register temp2 = ToRegister(instr->temp()); Handle class_name = instr->hydrogen()->class_name(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -2344,8 +2492,8 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { - Register reg = ToRegister(instr->InputAt(0)); - Register temp = ToRegister(instr->TempAt(0)); + Register reg = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); int true_block = instr->true_block_id(); int false_block = instr->false_block_id(); @@ -2356,8 +2504,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { - ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0. - ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1. + ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. + ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. InstanceofStub stub(InstanceofStub::kArgsInRegisters); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); @@ -2388,8 +2536,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); Label done, false_result; - Register object = ToRegister(instr->InputAt(0)); - Register temp = ToRegister(instr->TempAt(0)); + Register object = ToRegister(instr->value()); + Register temp = ToRegister(instr->temp()); Register result = ToRegister(instr->result()); ASSERT(object.is(r0)); @@ -2412,6 +2560,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. + PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); Handle cell = factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); __ mov(ip, Operand(Handle(cell))); @@ -2469,10 +2618,13 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, // Get the temp register reserved by the instruction. This needs to be r4 as // its slot of the pushing of safepoint registers is used to communicate the // offset to the location of the map check. - Register temp = ToRegister(instr->TempAt(0)); + Register temp = ToRegister(instr->temp()); ASSERT(temp.is(r4)); __ LoadHeapObject(InstanceofStub::right(), instr->function()); static const int kAdditionalDelta = 5; + // Make sure that code size is predicable, since we use specific constants + // offsets in the code to find embedded values.. + PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize); int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; Label before_push_delta; __ bind(&before_push_delta); @@ -2566,7 +2718,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // it as no longer deleted. if (instr->hydrogen()->RequiresHoleCheck()) { // We use a temp to check the payload (CompareRoot might clobber ip). - Register payload = ToRegister(instr->TempAt(0)); + Register payload = ToRegister(instr->temp()); __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset)); __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); DeoptimizeIf(eq, instr->environment()); @@ -2645,7 +2797,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - Register object = ToRegister(instr->InputAt(0)); + Register object = ToRegister(instr->object()); Register result = ToRegister(instr->result()); if (instr->hydrogen()->is_in_object()) { __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset())); @@ -2736,7 +2888,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { if (need_generic) { __ mov(r2, Operand(name)); Handle ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } __ bind(&done); } @@ -2749,7 +2901,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { // Name is always in r2. __ mov(r2, Operand(instr->name())); Handle ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } @@ -2799,7 +2951,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { void LCodeGen::DoLoadElements(LLoadElements* instr) { Register result = ToRegister(instr->result()); - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->object()); Register scratch = scratch0(); __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset)); @@ -2834,7 +2986,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register to_reg = ToRegister(instr->result()); - Register from_reg = ToRegister(instr->InputAt(0)); + Register from_reg = ToRegister(instr->object()); __ ldr(to_reg, FieldMemOperand(from_reg, ExternalArray::kExternalPointerOffset)); } @@ -2845,63 +2997,95 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register length = ToRegister(instr->length()); Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); - - // Bailout index is not a valid argument index. Use unsigned check to get - // negative check for free. - __ sub(length, length, index, SetCC); - DeoptimizeIf(ls, instr->environment()); - // There are two words between the frame pointer and the last argument. // Subtracting from length accounts for one of them add one more. + __ sub(length, length, index); __ add(length, length, Operand(1)); __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); } -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - Register key = EmitLoadRegister(instr->key(), scratch0()); - // Even though the HLoadKeyedFastElement instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } else { + key = ToRegister(instr->key()); } - __ ldr(result, FieldMemOperand(store_base, offset)); + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int additional_offset = instr->additional_index() << element_size_shift; - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ tst(result, Operand(kSmiTagMask)); - DeoptimizeIf(ne, instr->environment()); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr->environment()); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || + elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + CpuFeatures::Scope scope(VFP3); + DwVfpRegister result = ToDoubleRegister(instr->result()); + Operand operand = key_is_constant + ? Operand(constant_key << element_size_shift) + : Operand(key, LSL, shift_size); + __ add(scratch0(), external_pointer, operand); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(result.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, result.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); + } + } else { + Register result = ToRegister(instr->result()); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, + instr->additional_index(), additional_offset); + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + __ ldrsb(result, mem_operand); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ ldrb(result, mem_operand); + break; + case EXTERNAL_SHORT_ELEMENTS: + __ ldrsh(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ ldrh(result, mem_operand); + break; + case EXTERNAL_INT_ELEMENTS: + __ ldr(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ ldr(result, mem_operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ cmp(result, Operand(0x80000000)); + DeoptimizeIf(cs, instr->environment()); + } + break; + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; } } } -void LCodeGen::DoLoadKeyedFastDoubleElement( - LLoadKeyedFastDoubleElement* instr) { +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { Register elements = ToRegister(instr->elements()); bool key_is_constant = instr->key()->IsConstantOperand(); Register key = no_reg; @@ -2933,13 +3117,65 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( (instr->additional_index() << element_size_shift))); } + __ vldr(result, elements, 0); if (instr->hydrogen()->RequiresHoleCheck()) { __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); __ cmp(scratch, Operand(kHoleNanUpper32)); DeoptimizeIf(eq, instr->environment()); } +} - __ vldr(result, elements, 0); + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + Register elements = ToRegister(instr->elements()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + if (instr->key()->IsConstantOperand()) { + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; + } else { + Register key = EmitLoadRegister(instr->key(), scratch0()); + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ add(scratch, elements, + Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + } else { + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } + __ ldr(result, FieldMemOperand(store_base, offset)); + + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ tst(result, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } else { + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + __ cmp(result, scratch); + DeoptimizeIf(eq, instr->environment()); + } + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_external()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } } @@ -2979,93 +3215,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, } -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { - Register external_pointer = ToRegister(instr->external_pointer()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = instr->additional_index() << element_size_shift; - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); - DwVfpRegister result = ToDoubleRegister(instr->result()); - Operand operand = key_is_constant - ? Operand(constant_key << element_size_shift) - : Operand(key, LSL, shift_size); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, result.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(result, mem_operand); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(result, mem_operand); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(result, mem_operand); - break; - case EXTERNAL_INT_ELEMENTS: - __ ldr(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ cmp(result, Operand(0x80000000)); - DeoptimizeIf(cs, instr->environment()); - } - break; - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - -void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { - ASSERT(ToRegister(instr->object()).is(r1)); - ASSERT(ToRegister(instr->key()).is(r0)); +void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + ASSERT(ToRegister(instr->object()).is(r1)); + ASSERT(ToRegister(instr->key()).is(r0)); Handle ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } @@ -3091,7 +3246,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { - Register elem = ToRegister(instr->InputAt(0)); + Register elem = ToRegister(instr->elements()); Register result = ToRegister(instr->result()); Label done; @@ -3210,7 +3365,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { void LCodeGen::DoPushArgument(LPushArgument* instr) { - LOperand* argument = instr->InputAt(0); + LOperand* argument = instr->value(); if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { Abort("DoPushArgument not implemented for double type."); } else { @@ -3262,7 +3417,7 @@ void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { - Register global = ToRegister(instr->global()); + Register global = ToRegister(instr->global_object()); Register result = ToRegister(instr->result()); __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); } @@ -3322,7 +3477,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3388,7 +3543,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); __ cmp(input, Operand(0)); __ Move(result, input, pl); @@ -3418,7 +3573,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsDouble()) { - DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); + DwVfpRegister input = ToDoubleRegister(instr->value()); DwVfpRegister result = ToDoubleRegister(instr->result()); __ vabs(result, input); } else if (r.IsInteger32()) { @@ -3427,7 +3582,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { // Representation is tagged. DeferredMathAbsTaggedHeapNumber* deferred = new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); // Smi check. __ JumpIfNotSmi(input, deferred->entry()); // If smi, handle it directly. @@ -3438,29 +3593,24 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); - SwVfpRegister single_scratch = double_scratch0().low(); - Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); + Register scratch = scratch0(); __ EmitVFPTruncate(kRoundToMinusInf, - single_scratch, + result, input, - scratch1, - scratch2); + scratch, + double_scratch0()); DeoptimizeIf(ne, instr->environment()); - // Move the result back to general purpose register r0. - __ vmov(result, single_scratch); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Test for -0. Label done; __ cmp(result, Operand(0)); __ b(ne, &done); - __ vmov(scratch1, input.high()); - __ tst(scratch1, Operand(HeapNumber::kSignMask)); + __ vmov(scratch, input.high()); + __ tst(scratch, Operand(HeapNumber::kSignMask)); DeoptimizeIf(ne, instr->environment()); __ bind(&done); } @@ -3468,8 +3618,9 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); + DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); Register scratch = scratch0(); Label done, check_sign_on_zero; @@ -3494,12 +3645,12 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32)); DeoptimizeIf(ge, instr->environment()); + __ Vmov(double_scratch0(), 0.5, scratch); + __ vadd(double_scratch0(), input, double_scratch0()); + // Save the original sign for later comparison. __ and_(scratch, result, Operand(HeapNumber::kSignMask)); - __ Vmov(double_scratch0(), 0.5); - __ vadd(double_scratch0(), input, double_scratch0()); - // Check sign of the result: if the sign changed, the input // value was in ]0.5, 0[ and the result should be -0. __ vmov(result, double_scratch0().high()); @@ -3512,12 +3663,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { } __ EmitVFPTruncate(kRoundToMinusInf, - double_scratch0().low(), - double_scratch0(), result, - scratch); + double_scratch0(), + scratch, + double_scratch1); DeoptimizeIf(ne, instr->environment()); - __ vmov(result, double_scratch0().low()); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Test for -0. @@ -3533,22 +3683,22 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); } void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = ToDoubleRegister(instr->TempAt(0)); + DoubleRegister temp = ToDoubleRegister(instr->temp()); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity // Math.sqrt(-Infinity) == NaN Label done; - __ vmov(temp, -V8_INFINITY); + __ vmov(temp, -V8_INFINITY, scratch0()); __ VFPCompareAndSetFlags(input, temp); __ vneg(result, temp, eq); __ b(&done, eq); @@ -3564,11 +3714,11 @@ void LCodeGen::DoPower(LPower* instr) { Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - ASSERT(!instr->InputAt(1)->IsDoubleRegister() || - ToDoubleRegister(instr->InputAt(1)).is(d2)); - ASSERT(!instr->InputAt(1)->IsRegister() || - ToRegister(instr->InputAt(1)).is(r2)); - ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1)); + ASSERT(!instr->right()->IsDoubleRegister() || + ToDoubleRegister(instr->right()).is(d2)); + ASSERT(!instr->right()->IsRegister() || + ToRegister(instr->right()).is(r2)); + ASSERT(ToDoubleRegister(instr->left()).is(d1)); ASSERT(ToDoubleRegister(instr->result()).is(d3)); if (exponent_type.IsTagged()) { @@ -3608,7 +3758,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // Having marked this instruction as a call we can use any // registers. ASSERT(ToDoubleRegister(instr->result()).is(d7)); - ASSERT(ToRegister(instr->InputAt(0)).is(r0)); + ASSERT(ToRegister(instr->global_object()).is(r0)); static const int kSeedSize = sizeof(uint32_t); STATIC_ASSERT(kPointerSize == kSeedSize); @@ -3670,6 +3820,20 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { } +void LCodeGen::DoMathExp(LMathExp* instr) { + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); + DoubleRegister double_scratch2 = double_scratch0(); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp( + masm(), input, result, double_scratch1, double_scratch2, + temp1, temp2, scratch0()); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::LOG, @@ -3765,7 +3929,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) { int arity = instr->arity(); Handle ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3778,7 +3942,7 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { Handle ic = isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(r2, Operand(instr->name())); - CallCode(ic, mode, instr); + CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3803,7 +3967,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { Handle ic = isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(r2, Operand(instr->name())); - CallCode(ic, mode, instr); + CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3819,7 +3983,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallNew(LCallNew* instr) { - ASSERT(ToRegister(instr->InputAt(0)).is(r1)); + ASSERT(ToRegister(instr->constructor()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); @@ -3845,7 +4009,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ mov(scratch, Operand(instr->transition())); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); if (instr->hydrogen()->NeedsWriteBarrierForMap()) { - Register temp = ToRegister(instr->TempAt(0)); + Register temp = ToRegister(instr->temp()); // Update the write barrier for the map field. __ RecordWriteField(object, HeapObject::kMapOffset, @@ -3903,7 +4067,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { Handle ic = (instr->strict_mode_flag() == kStrictMode) ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } @@ -3945,102 +4109,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->object()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - // Even though the HLoadKeyedFastElement instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - __ str(value, FieldMemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - HType type = instr->hydrogen()->value()->type(); - SmiCheck check_needed = - type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ add(key, store_base, Operand(offset - kHeapObjectTag)); - __ RecordWrite(elements, - key, - value, - kLRHasBeenSaved, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } -} - - -void LCodeGen::DoStoreKeyedFastDoubleElement( - LStoreKeyedFastDoubleElement* instr) { - DwVfpRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - Operand operand = key_is_constant - ? Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(scratch, elements, operand); - if (!key_is_constant) { - __ add(scratch, scratch, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } - - if (instr->NeedsCanonicalization()) { - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - // Only load canonical NaN if the comparison above set the overflow. - __ Vmov(value, - FixedDoubleArray::canonical_not_the_hole_nan_as_double(), - vs); - } - - __ vstr(value, scratch, instr->additional_index() << element_size_shift); -} - - -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { - - Register external_pointer = ToRegister(instr->external_pointer()); +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); bool key_is_constant = instr->key()->IsConstantOperand(); @@ -4109,6 +4179,110 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + DwVfpRegister value = ToDoubleRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = no_reg; + Register scratch = scratch0(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + + // Calculate the effective address of the slot in the array to store the + // double value. + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + Operand operand = key_is_constant + ? Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag) + : Operand(key, LSL, shift_size); + __ add(scratch, elements, operand); + if (!key_is_constant) { + __ add(scratch, scratch, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + } + + if (instr->NeedsCanonicalization()) { + // Check for NaN. All NaNs must be canonicalized. + __ VFPCompareAndSetFlags(value, value); + // Only load canonical NaN if the comparison above set the overflow. + __ Vmov(value, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + no_reg, vs); + } + + __ vstr(value, scratch, instr->additional_index() << element_size_shift); +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register value = ToRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) + : no_reg; + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + // Do the store. + if (instr->key()->IsConstantOperand()) { + ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; + } else { + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ add(scratch, elements, + Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + } else { + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } + __ str(value, FieldMemOperand(store_base, offset)); + + if (instr->hydrogen()->NeedsWriteBarrier()) { + HType type = instr->hydrogen()->value()->type(); + SmiCheck check_needed = + type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ add(key, store_base, Operand(offset - kHeapObjectTag)); + __ RecordWrite(elements, + key, + value, + kLRHasBeenSaved, + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases: external, fast double + if (instr->is_external()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r2)); ASSERT(ToRegister(instr->key()).is(r1)); @@ -4117,13 +4291,13 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { Handle ic = (instr->strict_mode_flag() == kStrictMode) ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); + CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); } void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); - Register new_map_reg = ToRegister(instr->new_map_reg()); + Register new_map_reg = ToRegister(instr->new_map_temp()); Register scratch = scratch0(); Handle from_map = instr->original_map(); @@ -4144,7 +4318,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { scratch, kLRHasBeenSaved, kDontSaveFPRegs); } else if (IsFastSmiElementsKind(from_kind) && IsFastDoubleElementsKind(to_kind)) { - Register fixed_object_reg = ToRegister(instr->temp_reg()); + Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); __ mov(fixed_object_reg, object_reg); @@ -4152,7 +4326,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { RelocInfo::CODE_TARGET, instr); } else if (IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) { - Register fixed_object_reg = ToRegister(instr->temp_reg()); + Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); __ mov(fixed_object_reg, object_reg); @@ -4220,9 +4394,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ push(index); } CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); - if (FLAG_debug_code) { - __ AbortIfNotSmi(r0); - } + __ AssertSmi(r0); __ SmiUntag(r0); __ StoreToSafepointRegisterSlot(r0, result); } @@ -4277,14 +4449,14 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { void LCodeGen::DoStringLength(LStringLength* instr) { - Register string = ToRegister(instr->InputAt(0)); + Register string = ToRegister(instr->string()); Register result = ToRegister(instr->result()); __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); } void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); ASSERT(output->IsDoubleRegister()); @@ -4301,7 +4473,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); LOperand* output = instr->result(); SwVfpRegister flt_scratch = double_scratch0().low(); @@ -4317,7 +4489,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_, - instr_->InputAt(0), + instr_->value(), SIGNED_INT32); } virtual LInstruction* instr() { return instr_; } @@ -4325,7 +4497,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { LNumberTagI* instr_; }; - Register src = ToRegister(instr->InputAt(0)); + Register src = ToRegister(instr->value()); Register dst = ToRegister(instr->result()); DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); @@ -4342,7 +4514,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_, - instr_->InputAt(0), + instr_->value(), UNSIGNED_INT32); } virtual LInstruction* instr() { return instr_; } @@ -4350,7 +4522,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { LNumberTagU* instr_; }; - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); ASSERT(input->IsRegister() && input->Equals(instr->result())); Register reg = ToRegister(input); @@ -4392,7 +4564,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, if (FLAG_inline_new) { __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow); + __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); __ Move(dst, r5); __ b(&done); } @@ -4407,12 +4579,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ StoreToSafepointRegisterSlot(ip, dst); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); __ Move(dst, r0); + __ sub(dst, dst, Operand(kHeapObjectTag)); // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - __ sub(ip, dst, Operand(kHeapObjectTag)); - __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); + __ add(dst, dst, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -4428,22 +4601,25 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { LNumberTagD* instr_; }; - DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0)); + DoubleRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); Register reg = ToRegister(instr->result()); - Register temp1 = ToRegister(instr->TempAt(0)); - Register temp2 = ToRegister(instr->TempAt(1)); + Register temp1 = ToRegister(instr->temp()); + Register temp2 = ToRegister(instr->temp2()); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); + // We want the untagged address first for performance + __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), + DONT_TAG_RESULT); } else { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - __ sub(ip, reg, Operand(kHeapObjectTag)); - __ vstr(input_reg, ip, HeapNumber::kValueOffset); + __ vstr(input_reg, reg, HeapNumber::kValueOffset); + // Now that we have finished with the object's real address tag it + __ add(reg, reg, Operand(kHeapObjectTag)); } @@ -4456,18 +4632,19 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + __ sub(r0, r0, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(r0, reg); } void LCodeGen::DoSmiTag(LSmiTag* instr) { ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); - __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0))); + __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value())); } void LCodeGen::DoSmiUntag(LSmiUntag* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); if (instr->needs_check()) { STATIC_ASSERT(kHeapObjectTag == 1); @@ -4539,11 +4716,11 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { - Register input_reg = ToRegister(instr->InputAt(0)); + Register input_reg = ToRegister(instr->value()); Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); + Register scratch2 = ToRegister(instr->temp()); DwVfpRegister double_scratch = double_scratch0(); - SwVfpRegister single_scratch = double_scratch.low(); + DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3()); ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); @@ -4562,8 +4739,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { - Register scratch3 = ToRegister(instr->TempAt(1)); - DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2)); + Register scratch3 = ToRegister(instr->temp2()); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && !scratch3.is(scratch2)); @@ -4585,7 +4761,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ EmitECMATruncate(input_reg, double_scratch2, - single_scratch, + double_scratch, scratch1, scratch2, scratch3); @@ -4598,14 +4774,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ sub(ip, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch, ip, HeapNumber::kValueOffset); __ EmitVFPTruncate(kRoundToZero, - single_scratch, + input_reg, double_scratch, scratch1, - scratch2, + double_scratch2, kCheckForInexactConversion); DeoptimizeIf(ne, instr->environment()); - // Load the result. - __ vmov(input_reg, single_scratch); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ cmp(input_reg, Operand(0)); @@ -4630,7 +4804,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { LTaggedToI* instr_; }; - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4649,7 +4823,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); ASSERT(input->IsRegister()); LOperand* result = instr->result(); ASSERT(result->IsDoubleRegister()); @@ -4667,54 +4841,52 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register result_reg = ToRegister(instr->result()); Register scratch1 = scratch0(); - Register scratch2 = ToRegister(instr->TempAt(0)); - DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0)); - SwVfpRegister single_scratch = double_scratch0().low(); + Register scratch2 = ToRegister(instr->temp()); + DwVfpRegister double_input = ToDoubleRegister(instr->value()); + DwVfpRegister double_scratch = double_scratch0(); Label done; if (instr->truncating()) { - Register scratch3 = ToRegister(instr->TempAt(1)); + Register scratch3 = ToRegister(instr->temp2()); __ EmitECMATruncate(result_reg, double_input, - single_scratch, + double_scratch, scratch1, scratch2, scratch3); } else { - VFPRoundingMode rounding_mode = kRoundToMinusInf; - __ EmitVFPTruncate(rounding_mode, - single_scratch, + __ EmitVFPTruncate(kRoundToMinusInf, + result_reg, double_input, scratch1, - scratch2, + double_scratch, kCheckForInexactConversion); + // Deoptimize if we had a vfp invalid exception, // including inexact operation. DeoptimizeIf(ne, instr->environment()); - // Retrieve the result. - __ vmov(result_reg, single_scratch); } __ bind(&done); } void LCodeGen::DoCheckSmi(LCheckSmi* instr) { - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); __ tst(ToRegister(input), Operand(kSmiTagMask)); DeoptimizeIf(ne, instr->environment()); } void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); __ tst(ToRegister(input), Operand(kSmiTagMask)); DeoptimizeIf(eq, instr->environment()); } void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); Register scratch = scratch0(); __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); @@ -4787,7 +4959,7 @@ void LCodeGen::DoCheckMapCommon(Register reg, void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Register scratch = scratch0(); - LOperand* input = instr->InputAt(0); + LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); @@ -4807,7 +4979,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); + DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); } @@ -4823,7 +4995,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0)); + DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); Label is_smi, done, heap_number; // Both smi and heap number cases are handled. @@ -4857,8 +5029,9 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - Register temp1 = ToRegister(instr->TempAt(0)); - Register temp2 = ToRegister(instr->TempAt(1)); + ASSERT(instr->temp()->Equals(instr->result())); + Register temp1 = ToRegister(instr->temp()); + Register temp2 = ToRegister(instr->temp2()); Handle holder = instr->holder(); Handle current_prototype = instr->prototype(); @@ -4881,7 +5054,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { DoCheckMapCommon(temp1, temp2, Handle(current_prototype->map()), ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); - DeoptimizeIf(ne, instr->environment()); } @@ -4900,8 +5072,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { new(zone()) DeferredAllocateObject(this, instr); Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->TempAt(0)); - Register scratch2 = ToRegister(instr->TempAt(1)); + Register scratch = ToRegister(instr->temp()); + Register scratch2 = ToRegister(instr->temp2()); Handle constructor = instr->hydrogen()->constructor(); Handle initial_map(constructor->initial_map()); int instance_size = initial_map->instance_size(); @@ -5193,7 +5365,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { void LCodeGen::DoToFastProperties(LToFastProperties* instr) { - ASSERT(ToRegister(instr->InputAt(0)).is(r0)); + ASSERT(ToRegister(instr->value()).is(r0)); __ push(r0); CallRuntime(Runtime::kToFastProperties, 1, instr); } @@ -5274,14 +5446,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { void LCodeGen::DoTypeof(LTypeof* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); __ push(input); CallRuntime(Runtime::kTypeof, 1, instr); } void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { - Register input = ToRegister(instr->InputAt(0)); + Register input = ToRegister(instr->value()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); Label* true_label = chunk_->GetAssemblyLabel(true_block); @@ -5371,7 +5543,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { - Register temp1 = ToRegister(instr->TempAt(0)); + Register temp1 = ToRegister(instr->temp()); int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); @@ -5492,6 +5664,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ cmp(sp, Operand(ip)); __ b(hs, &done); StackCheckStub stub; + PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); __ bind(&done); @@ -5572,7 +5745,6 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { Register map = ToRegister(instr->map()); Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->scratch()); Label load_cache, done; __ EnumLength(result, map); __ cmp(result, Operand(Smi::FromInt(0))); @@ -5581,7 +5753,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { __ jmp(&done); __ bind(&load_cache); - __ LoadInstanceDescriptors(map, result, scratch); + __ LoadInstanceDescriptors(map, result); __ ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); __ ldr(result, diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index fd4a2a5ca7..921285b0d2 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -110,8 +110,9 @@ class LCodeGen BASE_EMBEDDED { void FinishCode(Handle code); // Deferred code support. - template - void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + void DoDeferredBinaryOpStub(LPointerMap* pointer_map, + LOperand* left_argument, + LOperand* right_argument, Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); @@ -147,7 +148,10 @@ class LCodeGen BASE_EMBEDDED { int additional_offset); // Emit frame translation commands for an environment. - void WriteTranslation(LEnvironment* environment, Translation* translation); + void WriteTranslation(LEnvironment* environment, + Translation* translation, + int* arguments_index, + int* arguments_count); // Declare methods that deal with the individual node types. #define DECLARE_DO(type) void Do##type(L##type* node); @@ -209,14 +213,18 @@ class LCodeGen BASE_EMBEDDED { RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS }; - void CallCode(Handle code, - RelocInfo::Mode mode, - LInstruction* instr); + void CallCode( + Handle code, + RelocInfo::Mode mode, + LInstruction* instr, + TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); - void CallCodeGeneric(Handle code, - RelocInfo::Mode mode, - LInstruction* instr, - SafepointMode safepoint_mode); + void CallCodeGeneric( + Handle code, + RelocInfo::Mode mode, + LInstruction* instr, + SafepointMode safepoint_mode, + TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS); void CallRuntime(const Runtime::Function* function, int num_arguments, @@ -258,7 +266,9 @@ class LCodeGen BASE_EMBEDDED { void AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, - bool is_uint32); + bool is_uint32, + int arguments_index, + int arguments_count); void PopulateDeoptimizationData(Handle code); int DefineDeoptimizationLiteral(Handle literal); @@ -367,6 +377,12 @@ class LCodeGen BASE_EMBEDDED { }; void EnsureSpaceForLazyDeopt(); + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); Zone* zone_; LPlatformChunk* const chunk_; diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2a677be525..dc1dc1da9b 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -108,7 +108,7 @@ void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, int MacroAssembler::CallSize(Register target, Condition cond) { -#if USE_BLX +#ifdef USE_BLX return kInstrSize; #else return 2 * kInstrSize; @@ -121,7 +121,7 @@ void MacroAssembler::Call(Register target, Condition cond) { BlockConstPoolScope block_const_pool(this); Label start; bind(&start); -#if USE_BLX +#ifdef USE_BLX blx(target, cond); #else // set lr for return at current pc + 8 @@ -158,15 +158,29 @@ int MacroAssembler::CallSizeNotPredictableCodeSize( void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, - Condition cond) { + Condition cond, + TargetAddressStorageMode mode) { // Block constant pool for the call instruction sequence. BlockConstPoolScope block_const_pool(this); Label start; bind(&start); -#if USE_BLX - // On ARMv5 and after the recommended call sequence is: - // ldr ip, [pc, #...] - // blx ip + + bool old_predictable_code_size = predictable_code_size(); + if (mode == NEVER_INLINE_TARGET_ADDRESS) { + set_predictable_code_size(true); + } + +#ifdef USE_BLX + // Call sequence on V7 or later may be : + // movw ip, #... @ call address low 16 + // movt ip, #... @ call address high 16 + // blx ip + // @ return address + // Or for pre-V7 or values that may be back-patched + // to avoid ICache flushes: + // ldr ip, [pc, #...] @ call address + // blx ip + // @ return address // Statement positions are expected to be recorded when the target // address is loaded. The mov method will automatically record @@ -177,15 +191,16 @@ void MacroAssembler::Call(Address target, mov(ip, Operand(reinterpret_cast(target), rmode)); blx(ip, cond); - ASSERT(kCallTargetAddressOffset == 2 * kInstrSize); #else // Set lr for return at current pc + 8. mov(lr, Operand(pc), LeaveCC, cond); // Emit a ldr pc, [pc + offset of target in constant pool]. mov(pc, Operand(reinterpret_cast(target), rmode), LeaveCC, cond); - ASSERT(kCallTargetAddressOffset == kInstrSize); #endif ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start)); + if (mode == NEVER_INLINE_TARGET_ADDRESS) { + set_predictable_code_size(old_predictable_code_size); + } } @@ -200,7 +215,8 @@ int MacroAssembler::CallSize(Handle code, void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, - Condition cond) { + Condition cond, + TargetAddressStorageMode mode) { Label start; bind(&start); ASSERT(RelocInfo::IsCodeTarget(rmode)); @@ -209,9 +225,7 @@ void MacroAssembler::Call(Handle code, rmode = RelocInfo::CODE_TARGET_WITH_ID; } // 'code' is always generated ARM code, never THUMB code - Call(reinterpret_cast
(code.location()), rmode, cond); - ASSERT_EQ(CallSize(code, rmode, ast_id, cond), - SizeOfCodeGeneratedSince(&start)); + Call(reinterpret_cast
(code.location()), rmode, cond, mode); } @@ -288,17 +302,15 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { void MacroAssembler::And(Register dst, Register src1, const Operand& src2, Condition cond) { if (!src2.is_reg() && - !src2.must_use_constant_pool(this) && + !src2.must_output_reloc_info(this) && src2.immediate() == 0) { mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); - } else if (!src2.is_single_instruction(this) && - !src2.must_use_constant_pool(this) && + !src2.must_output_reloc_info(this) && CpuFeatures::IsSupported(ARMv7) && IsPowerOf2(src2.immediate() + 1)) { ubfx(dst, src1, 0, WhichPowerOf2(static_cast(src2.immediate()) + 1), cond); - } else { and_(dst, src1, src2, LeaveCC, cond); } @@ -363,12 +375,14 @@ void MacroAssembler::Bfi(Register dst, } -void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { +void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, + Condition cond) { ASSERT(lsb < 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); - bic(dst, dst, Operand(mask)); + bic(dst, src, Operand(mask)); } else { + Move(dst, src, cond); bfc(dst, lsb, width, cond); } } @@ -408,6 +422,17 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { + if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && + !Heap::RootCanBeWrittenAfterInitialization(index) && + !predictable_code_size()) { + Handle root(isolate()->heap()->roots_array_start()[index]); + if (!isolate()->heap()->InNewSpace(*root)) { + // The CPU supports fast immediate values, and this root will never + // change. We will load it as a relocatable immediate value. + mov(destination, Operand(root), LeaveCC, cond); + return; + } + } ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -789,6 +814,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, void MacroAssembler::Vmov(const DwVfpRegister dst, const double imm, + const Register scratch, const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP2)); static const DoubleRepresentation minus_zero(-0.0); @@ -800,7 +826,7 @@ void MacroAssembler::Vmov(const DwVfpRegister dst, } else if (value.bits == minus_zero.bits) { vneg(dst, kDoubleRegZero, cond); } else { - vmov(dst, imm, cond); + vmov(dst, imm, scratch, cond); } } @@ -1567,7 +1593,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size, Register topaddr = scratch1; Register obj_size_reg = scratch2; mov(topaddr, Operand(new_space_allocation_top)); - mov(obj_size_reg, Operand(object_size)); + Operand obj_size_operand = Operand(object_size); + if (!obj_size_operand.is_single_instruction(this)) { + // We are about to steal IP, so we need to load this value first + mov(obj_size_reg, obj_size_operand); + } // This code stores a temporary value in ip. This is OK, as the code below // does not need ip for implicit literal generation. @@ -1589,7 +1619,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. - add(scratch2, result, Operand(obj_size_reg), SetCC); + if (obj_size_operand.is_single_instruction(this)) { + // We can add the size as an immediate + add(scratch2, result, obj_size_operand, SetCC); + } else { + // Doesn't fit in an immediate, we have to use the register + add(scratch2, result, obj_size_reg, SetCC); + } b(cs, gc_required); cmp(scratch2, Operand(ip)); b(hi, gc_required); @@ -1751,10 +1787,10 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT(kCharSize == 1); add(scratch1, length, - Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); + Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate ASCII string in new space. @@ -1920,13 +1956,13 @@ void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail) { + Label* fail, + int elements_offset) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -1953,8 +1989,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&have_double_value); add(scratch1, elements_reg, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + str(mantissa_reg, FieldMemOperand( + scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); + uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + + sizeof(kHoleNanLower32); str(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); @@ -1975,7 +2013,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - + elements_offset)); add(scratch1, scratch1, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); // scratch1 is now effective address of the double element @@ -1987,7 +2026,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, destination = FloatingPointHelper::kCoreRegisters; } - Register untagged_value = receiver_reg; + Register untagged_value = elements_reg; SmiUntag(untagged_value, value_reg); FloatingPointHelper::ConvertIntToDouble(this, untagged_value, @@ -2184,12 +2223,28 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, add(r6, r6, Operand(1)); str(r6, MemOperand(r7, kLevelOffset)); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, r0); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. DirectCEntryStub stub; stub.GenerateCall(this, function); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, r0); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PopSafepointRegisters(); + } + Label promote_scheduled_exception; Label delete_allocated_handles; Label leave_exit_frame; @@ -2435,17 +2490,38 @@ void MacroAssembler::ConvertToInt32(Register source, } +void MacroAssembler::TryFastDoubleToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Label* done) { + ASSERT(!double_input.is(double_scratch)); + + vcvt_s32_f64(double_scratch.low(), double_input); + vmov(result, double_scratch.low()); + vcvt_f64_s32(double_scratch, double_scratch.low()); + VFPCompareAndSetFlags(double_input, double_scratch); + b(eq, done); +} + + void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, + Register result, DwVfpRegister double_input, - Register scratch1, - Register scratch2, + Register scratch, + DwVfpRegister double_scratch, CheckForInexactConversion check_inexact) { + ASSERT(!result.is(scratch)); + ASSERT(!double_input.is(double_scratch)); + ASSERT(CpuFeatures::IsSupported(VFP2)); CpuFeatures::Scope scope(VFP2); - Register prev_fpscr = scratch1; - Register scratch = scratch2; + Register prev_fpscr = result; + Label done; + + // Test for values that can be exactly represented as a signed 32-bit integer. + TryFastDoubleToInt32(result, double_input, double_scratch, &done); + // Convert to integer, respecting rounding mode. int32_t check_inexact_conversion = (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; @@ -2467,7 +2543,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, vmsr(scratch); // Convert the argument to an integer. - vcvt_s32_f64(result, + vcvt_s32_f64(double_scratch.low(), double_input, (rounding_mode == kRoundToZero) ? kDefaultRoundToZero : kFPSCRRounding); @@ -2476,8 +2552,12 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, vmrs(scratch); // Restore FPSCR. vmsr(prev_fpscr); + // Move the converted value into the result register. + vmov(result, double_scratch.low()); // Check for vfp exceptions. tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); + + bind(&done); } @@ -2556,7 +2636,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, void MacroAssembler::EmitECMATruncate(Register result, DwVfpRegister double_input, - SwVfpRegister single_scratch, + DwVfpRegister double_scratch, Register scratch, Register input_high, Register input_low) { @@ -2567,16 +2647,18 @@ void MacroAssembler::EmitECMATruncate(Register result, ASSERT(!scratch.is(result) && !scratch.is(input_high) && !scratch.is(input_low)); - ASSERT(!single_scratch.is(double_input.low()) && - !single_scratch.is(double_input.high())); + ASSERT(!double_input.is(double_scratch)); Label done; + // Test for values that can be exactly represented as a signed 32-bit integer. + TryFastDoubleToInt32(result, double_input, double_scratch, &done); + // Clear cumulative exception flags. ClearFPSCRBits(kVFPExceptionMask, scratch); // Try a conversion to a signed integer. - vcvt_s32_f64(single_scratch, double_input); - vmov(result, single_scratch); + vcvt_s32_f64(double_scratch.low(), double_input); + vmov(result, double_scratch.low()); // Retrieve he FPSCR. vmrs(scratch); // Check for overflow and NaNs. @@ -3017,38 +3099,46 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, } -void MacroAssembler::AbortIfSmi(Register object) { - STATIC_ASSERT(kSmiTag == 0); - tst(object, Operand(kSmiTagMask)); - Assert(ne, "Operand is a smi"); +void MacroAssembler::AssertNotSmi(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(ne, "Operand is a smi"); + } } -void MacroAssembler::AbortIfNotSmi(Register object) { - STATIC_ASSERT(kSmiTag == 0); - tst(object, Operand(kSmiTagMask)); - Assert(eq, "Operand is not smi"); +void MacroAssembler::AssertSmi(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(eq, "Operand is not smi"); + } } -void MacroAssembler::AbortIfNotString(Register object) { - STATIC_ASSERT(kSmiTag == 0); - tst(object, Operand(kSmiTagMask)); - Assert(ne, "Operand is not a string"); - push(object); - ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); - CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); - pop(object); - Assert(lo, "Operand is not a string"); +void MacroAssembler::AssertString(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(ne, "Operand is a smi and not a string"); + push(object); + ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); + CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); + pop(object); + Check(lo, "Operand is not a string"); + } } -void MacroAssembler::AbortIfNotRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message) { - CompareRoot(src, root_value_index); - Assert(eq, message); +void MacroAssembler::AssertRootValue(Register src, + Heap::RootListIndex root_value_index, + const char* message) { + if (emit_debug_code()) { + CompareRoot(src, root_value_index); + Check(eq, message); + } } @@ -3106,7 +3196,8 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, - Label* gc_required) { + Label* gc_required, + TaggingMode tagging_mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. AllocateInNewSpace(HeapNumber::kSize, @@ -3114,11 +3205,16 @@ void MacroAssembler::AllocateHeapNumber(Register result, scratch1, scratch2, gc_required, - TAG_OBJECT); + tagging_mode == TAG_RESULT ? TAG_OBJECT : + NO_ALLOCATION_FLAGS); // Store heap number map in the allocated object. AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); + if (tagging_mode == TAG_RESULT) { + str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); + } else { + str(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); + } } @@ -3189,17 +3285,17 @@ void MacroAssembler::CopyBytes(Register src, cmp(length, Operand(kPointerSize)); b(lt, &byte_loop); ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); -#if CAN_USE_UNALIGNED_ACCESSES - str(scratch, MemOperand(dst, kPointerSize, PostIndex)); -#else - strb(scratch, MemOperand(dst, 1, PostIndex)); - mov(scratch, Operand(scratch, LSR, 8)); - strb(scratch, MemOperand(dst, 1, PostIndex)); - mov(scratch, Operand(scratch, LSR, 8)); - strb(scratch, MemOperand(dst, 1, PostIndex)); - mov(scratch, Operand(scratch, LSR, 8)); - strb(scratch, MemOperand(dst, 1, PostIndex)); -#endif + if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { + str(scratch, MemOperand(dst, kPointerSize, PostIndex)); + } else { + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + mov(scratch, Operand(scratch, LSR, 8)); + strb(scratch, MemOperand(dst, 1, PostIndex)); + } sub(length, length, Operand(kPointerSize)); b(&word_loop); @@ -3274,8 +3370,10 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register scratch2, Label* failure) { int kFlatAsciiStringMask = - kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask | + kStringRepresentationMask; int kFlatAsciiStringTag = ASCII_STRING_TYPE; + ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask); and_(scratch1, first, Operand(kFlatAsciiStringMask)); and_(scratch2, second, Operand(kFlatAsciiStringMask)); cmp(scratch1, Operand(kFlatAsciiStringTag)); @@ -3289,8 +3387,10 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label* failure) { int kFlatAsciiStringMask = - kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask | + kStringRepresentationMask; int kFlatAsciiStringTag = ASCII_STRING_TYPE; + ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask); and_(scratch, type, Operand(kFlatAsciiStringMask)); cmp(scratch, Operand(kFlatAsciiStringTag)); b(ne, failure); @@ -3481,7 +3581,7 @@ void MacroAssembler::CheckPageFlag( int mask, Condition cc, Label* condition_met) { - and_(scratch, object, Operand(~Page::kPageAlignmentMask)); + Bfc(scratch, object, 0, kPageSizeBits); ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); tst(scratch, Operand(mask)); b(cc, condition_met); @@ -3630,7 +3730,7 @@ void MacroAssembler::EnsureNotWhite( // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ldr(ip, FieldMemOperand(value, String::kLengthOffset)); tst(instance_type, Operand(kStringEncodingMask)); @@ -3676,7 +3776,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, // Double value is >= 255, return 255. bind(&above_zero); - Vmov(temp_double_reg, 255.0); + Vmov(temp_double_reg, 255.0, result_reg); VFPCompareAndSetFlags(input_reg, temp_double_reg); b(le, &in_bounds); mov(result_reg, Operand(255)); @@ -3698,22 +3798,14 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, void MacroAssembler::LoadInstanceDescriptors(Register map, - Register descriptors, - Register scratch) { - Register temp = descriptors; - ldr(temp, FieldMemOperand(map, Map::kTransitionsOrBackPointerOffset)); - - Label ok, fail; - CheckMap(temp, - scratch, - isolate()->factory()->fixed_array_map(), - &fail, - DONT_DO_SMI_CHECK); - ldr(descriptors, FieldMemOperand(temp, TransitionArray::kDescriptorsOffset)); - jmp(&ok); - bind(&fail); - mov(descriptors, Operand(FACTORY->empty_descriptor_array())); - bind(&ok); + Register descriptors) { + ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); +} + + +void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { + ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); + DecodeField(dst); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 8eb97125ea..15cef16f05 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -68,6 +68,13 @@ enum AllocationFlags { SIZE_IN_WORDS = 1 << 2 }; +// Flags used for AllocateHeapNumber +enum TaggingMode { + // Tag the result. + TAG_RESULT, + // Don't tag + DONT_TAG_RESULT +}; // Flags used for the ObjectToDoubleVFPRegister function. enum ObjectToDoubleFlags { @@ -95,6 +102,11 @@ bool AreAliased(Register reg1, #endif +enum TargetAddressStorageMode { + CAN_INLINE_TARGET_ADDRESS, + NEVER_INLINE_TARGET_ADDRESS +}; + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -114,7 +126,9 @@ class MacroAssembler: public Assembler { static int CallSizeNotPredictableCodeSize(Address target, RelocInfo::Mode rmode, Condition cond = al); - void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); + void Call(Address target, RelocInfo::Mode rmode, + Condition cond = al, + TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); int CallSize(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, TypeFeedbackId ast_id = TypeFeedbackId::None(), @@ -122,7 +136,8 @@ class MacroAssembler: public Assembler { void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, TypeFeedbackId ast_id = TypeFeedbackId::None(), - Condition cond = al); + Condition cond = al, + TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); void Ret(Condition cond = al); // Emit code to discard a non-negative number of pointer-sized elements @@ -154,7 +169,7 @@ class MacroAssembler: public Assembler { int lsb, int width, Condition cond = al); - void Bfc(Register dst, int lsb, int width, Condition cond = al); + void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); void Usat(Register dst, int satpos, const Operand& src, Condition cond = al); @@ -307,6 +322,7 @@ class MacroAssembler: public Assembler { // Push a handle. void Push(Handle handle); + void Push(Smi* smi) { Push(Handle(smi)); } // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { @@ -483,6 +499,7 @@ class MacroAssembler: public Assembler { void Vmov(const DwVfpRegister dst, const double imm, + const Register scratch = no_reg, const Condition cond = al); // Enter exit frame. @@ -730,7 +747,8 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2, Register heap_number_map, - Label* gc_required); + Label* gc_required, + TaggingMode tagging_mode = TAG_RESULT); void AllocateHeapNumberWithValue(Register result, DwVfpRegister value, Register scratch1, @@ -814,13 +832,14 @@ class MacroAssembler: public Assembler { // case scratch2, scratch3 and scratch4 are unmodified. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, + // All regs below here overwritten. Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail); + Label* fail, + int elements_offset = 0); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are @@ -875,12 +894,15 @@ class MacroAssembler: public Assembler { // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. - // Returns a condition that will be enabled if the object was a string. + // Returns a condition that will be enabled if the object was a string + // and the passed-in condition passed. If the passed-in condition failed + // then flags remain unchanged. Condition IsObjectStringType(Register obj, - Register type) { - ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset)); - ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); - tst(type, Operand(kIsNotStringMask)); + Register type, + Condition cond = al) { + ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); + ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); + tst(type, Operand(kIsNotStringMask), cond); ASSERT_EQ(0, kStringTag); return eq; } @@ -937,21 +959,30 @@ class MacroAssembler: public Assembler { DwVfpRegister double_scratch, Label *not_int32); - // Truncates a double using a specific rounding mode. + // Try to convert a double to a signed 32-bit integer. If the double value + // can be exactly represented as an integer, the code jumps to 'done' and + // 'result' contains the integer value. Otherwise, the code falls through. + void TryFastDoubleToInt32(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Label* done); + + // Truncates a double using a specific rounding mode, and writes the value + // to the result register. // Clears the z flag (ne condition) if an overflow occurs. - // If exact_conversion is true, the z flag is also cleared if the conversion - // was inexact, i.e. if the double value could not be converted exactly - // to a 32bit integer. + // If kCheckForInexactConversion is passed, the z flag is also cleared if the + // conversion was inexact, i.e. if the double value could not be converted + // exactly to a 32-bit integer. void EmitVFPTruncate(VFPRoundingMode rounding_mode, - SwVfpRegister result, + Register result, DwVfpRegister double_input, - Register scratch1, - Register scratch2, + Register scratch, + DwVfpRegister double_scratch, CheckForInexactConversion check = kDontCheckForInexactConversion); // Helper for EmitECMATruncate. - // This will truncate a floating-point value outside of the singed 32bit + // This will truncate a floating-point value outside of the signed 32bit // integer range to a 32bit signed integer. // Expects the double value loaded in input_high and input_low. // Exits with the answer in 'result'. @@ -966,7 +997,7 @@ class MacroAssembler: public Assembler { // Exits with 'result' holding the answer and all other registers clobbered. void EmitECMATruncate(Register result, DwVfpRegister double_input, - SwVfpRegister single_scratch, + DwVfpRegister double_scratch, Register scratch, Register scratch2, Register scratch3); @@ -1183,7 +1214,7 @@ class MacroAssembler: public Assembler { // Souce and destination can be the same register. void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); - // Jump the register contains a smi. + // Jump if the register contains a smi. inline void JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); @@ -1198,17 +1229,18 @@ class MacroAssembler: public Assembler { // Jump if either of the registers contain a smi. void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); - // Abort execution if argument is a smi. Used in debug code. - void AbortIfSmi(Register object); - void AbortIfNotSmi(Register object); + // Abort execution if argument is a smi, enabled via --debug-code. + void AssertNotSmi(Register object); + void AssertSmi(Register object); - // Abort execution if argument is a string. Used in debug code. - void AbortIfNotString(Register object); + // Abort execution if argument is a string, enabled via --debug-code. + void AssertString(Register object); - // Abort execution if argument is not the root value with the given index. - void AbortIfNotRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message); + // Abort execution if argument is not the root value with the given index, + // enabled via --debug-code. + void AssertRootValue(Register src, + Heap::RootListIndex root_value_index, + const char* message); // --------------------------------------------------------------------------- // HeapNumber utilities @@ -1269,10 +1301,17 @@ class MacroAssembler: public Assembler { DoubleRegister temp_double_reg); - void LoadInstanceDescriptors(Register map, - Register descriptors, - Register scratch); + void LoadInstanceDescriptors(Register map, Register descriptors); void EnumLength(Register dst, Register map); + void NumberOfOwnDescriptors(Register dst, Register map); + + template + void DecodeField(Register reg) { + static const int shift = Field::kShift; + static const int mask = (Field::kMask >> shift) << kSmiTagSize; + mov(reg, Operand(reg, LSR, shift)); + and_(reg, reg, Operand(mask)); + } // Activation support. void EnterFrame(StackFrame::Type type); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 66cdd8435e..d852d23760 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1150,7 +1150,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Handle subject(frame_entry(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1181,7 +1181,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsAsciiRepresentation() != is_ascii) { + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). @@ -1358,6 +1358,11 @@ void RegExpMacroAssemblerARM::CallCFunctionUsingStub( } +bool RegExpMacroAssemblerARM::CanReadUnaligned() { + return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe(); +} + + void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); @@ -1370,9 +1375,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, // and the operating system running on the target allow it. // If unaligned load/stores are not supported then this function must only // be used to load a single character at a time. -#if !V8_TARGET_CAN_READ_UNALIGNED - ASSERT(characters == 1); -#endif + if (!CanReadUnaligned()) { + ASSERT(characters == 1); + } if (mode_ == ASCII) { if (characters == 4) { diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index f723fa212f..c45669ae89 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -109,6 +109,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); + virtual bool CanReadUnaligned(); // Called from RegExp if the stack-guard is triggered. // If the code object is relocated, the return address is fixed before diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index a057de58cc..d11e340a9b 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1066,111 +1066,83 @@ void Simulator::TrashCallerSaveRegisters() { int Simulator::ReadW(int32_t addr, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - intptr_t* ptr = reinterpret_cast(addr); - return *ptr; -#else - if ((addr & 3) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) { intptr_t* ptr = reinterpret_cast(addr); return *ptr; + } else { + PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); + UNIMPLEMENTED(); + return 0; } - PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, - reinterpret_cast(instr)); - UNIMPLEMENTED(); - return 0; -#endif } void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - intptr_t* ptr = reinterpret_cast(addr); - *ptr = value; - return; -#else - if ((addr & 3) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) { intptr_t* ptr = reinterpret_cast(addr); *ptr = value; - return; + } else { + PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); + UNIMPLEMENTED(); } - PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, - reinterpret_cast(instr)); - UNIMPLEMENTED(); -#endif } uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - uint16_t* ptr = reinterpret_cast(addr); - return *ptr; -#else - if ((addr & 1) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) { uint16_t* ptr = reinterpret_cast(addr); return *ptr; + } else { + PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" + V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); + UNIMPLEMENTED(); + return 0; } - PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, - reinterpret_cast(instr)); - UNIMPLEMENTED(); - return 0; -#endif } int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - int16_t* ptr = reinterpret_cast(addr); - return *ptr; -#else - if ((addr & 1) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) { int16_t* ptr = reinterpret_cast(addr); return *ptr; + } else { + PrintF("Unaligned signed halfword read at 0x%08x\n", addr); + UNIMPLEMENTED(); + return 0; } - PrintF("Unaligned signed halfword read at 0x%08x\n", addr); - UNIMPLEMENTED(); - return 0; -#endif } void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - uint16_t* ptr = reinterpret_cast(addr); - *ptr = value; - return; -#else - if ((addr & 1) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) { uint16_t* ptr = reinterpret_cast(addr); *ptr = value; - return; + } else { + PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" + V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); + UNIMPLEMENTED(); } - PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, - reinterpret_cast(instr)); - UNIMPLEMENTED(); -#endif } void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { -#if V8_TARGET_CAN_READ_UNALIGNED - int16_t* ptr = reinterpret_cast(addr); - *ptr = value; - return; -#else - if ((addr & 1) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) { int16_t* ptr = reinterpret_cast(addr); *ptr = value; - return; + } else { + PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", + addr, + reinterpret_cast(instr)); + UNIMPLEMENTED(); } - PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, - reinterpret_cast(instr)); - UNIMPLEMENTED(); -#endif } @@ -1199,37 +1171,26 @@ void Simulator::WriteB(int32_t addr, int8_t value) { int32_t* Simulator::ReadDW(int32_t addr) { -#if V8_TARGET_CAN_READ_UNALIGNED - int32_t* ptr = reinterpret_cast(addr); - return ptr; -#else - if ((addr & 3) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) { int32_t* ptr = reinterpret_cast(addr); return ptr; + } else { + PrintF("Unaligned read at 0x%08x\n", addr); + UNIMPLEMENTED(); + return 0; } - PrintF("Unaligned read at 0x%08x\n", addr); - UNIMPLEMENTED(); - return 0; -#endif } void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) { -#if V8_TARGET_CAN_READ_UNALIGNED - int32_t* ptr = reinterpret_cast(addr); - *ptr++ = value1; - *ptr = value2; - return; -#else - if ((addr & 3) == 0) { + if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) { int32_t* ptr = reinterpret_cast(addr); *ptr++ = value1; *ptr = value2; - return; + } else { + PrintF("Unaligned write at 0x%08x\n", addr); + UNIMPLEMENTED(); } - PrintF("Unaligned write at 0x%08x\n", addr); - UNIMPLEMENTED(); -#endif } @@ -1426,7 +1387,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - UNIMPLEMENTED(); + if (shift_amount == 0) { + *carry_out = c_flag_; + } else { + uint32_t left = static_cast(result) >> shift_amount; + uint32_t right = static_cast(result) << (32 - shift_amount); + result = right | left; + *carry_out = (static_cast(result) >> 31) != 0; + } break; } @@ -1498,7 +1466,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - UNIMPLEMENTED(); + if (shift_amount == 0) { + *carry_out = c_flag_; + } else { + uint32_t left = static_cast(result) >> shift_amount; + uint32_t right = static_cast(result) << (32 - shift_amount); + result = right | left; + *carry_out = (static_cast(result) >> 31) != 0; + } break; } @@ -1986,11 +1961,23 @@ void Simulator::DecodeType01(Instruction* instr) { SetNZFlags(alu_out); } } else { - // The MLA instruction description (A 4.1.28) refers to the order - // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the - // Rn field to encode the Rd register and the Rd field to encode - // the Rn register. - Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); + int rd = instr->RdValue(); + int32_t acc_value = get_register(rd); + if (instr->Bit(22) == 0) { + // The MLA instruction description (A 4.1.28) refers to the order + // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the + // Rn field to encode the Rd register and the Rd field to encode + // the Rn register. + // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); + int32_t mul_out = rm_val * rs_val; + int32_t result = acc_value + mul_out; + set_register(rn, result); + } else { + // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd"); + int32_t mul_out = rm_val * rs_val; + int32_t result = acc_value - mul_out; + set_register(rn, result); + } } } else { // The signed/long multiply instructions use the terms RdHi and RdLo @@ -2210,6 +2197,8 @@ void Simulator::DecodeType01(Instruction* instr) { PrintF("%08x\n", instr->InstructionBits()); UNIMPLEMENTED(); } + } else if ((type == 1) && instr->IsNopType1()) { + // NOP. } else { int rd = instr->RdValue(); int rn = instr->RnValue(); @@ -2546,6 +2535,25 @@ void Simulator::DecodeType3(Instruction* instr) { break; } case db_x: { + if (FLAG_enable_sudiv) { + if (!instr->HasW()) { + if (instr->Bits(5, 4) == 0x1) { + if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) { + // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs + // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs); + int rm = instr->RmValue(); + int32_t rm_val = get_register(rm); + int rs = instr->RsValue(); + int32_t rs_val = get_register(rs); + int32_t ret_val = 0; + ASSERT(rs_val != 0); + ret_val = rm_val/rs_val; + set_register(rn, ret_val); + return; + } + } + } + } // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); addr = rn_val - shifter_operand; if (instr->HasW()) { @@ -2770,6 +2778,20 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = dn_value * dm_value; set_d_register_from_double(vd, dd_value); + } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) { + // vmla + if (instr->SzValue() != 0x1) { + UNREACHABLE(); // Not used by V8. + } + + double dd_value = get_double_from_d_register(vd); + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + + // Note: we do the mul and add in separate steps to avoid getting a result + // with too high precision. + set_d_register_from_double(vd, dn_value * dm_value); + set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value); } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { // vdiv if (instr->SzValue() != 0x1) { @@ -3279,33 +3301,7 @@ void Simulator::Execute() { } -int32_t Simulator::Call(byte* entry, int argument_count, ...) { - va_list parameters; - va_start(parameters, argument_count); - // Set up arguments - - // First four arguments passed in registers. - ASSERT(argument_count >= 4); - set_register(r0, va_arg(parameters, int32_t)); - set_register(r1, va_arg(parameters, int32_t)); - set_register(r2, va_arg(parameters, int32_t)); - set_register(r3, va_arg(parameters, int32_t)); - - // Remaining arguments passed on stack. - int original_stack = get_register(sp); - // Compute position of stack on entry to generated code. - int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); - } - // Store remaining arguments on stack, from low to high memory. - intptr_t* stack_argument = reinterpret_cast(entry_stack); - for (int i = 4; i < argument_count; i++) { - stack_argument[i - 4] = va_arg(parameters, int32_t); - } - va_end(parameters); - set_register(sp, entry_stack); - +void Simulator::CallInternal(byte* entry) { // Prepare to execute the code at entry set_register(pc, reinterpret_cast(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -3359,6 +3355,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { set_register(r9, r9_val); set_register(r10, r10_val); set_register(r11, r11_val); +} + + +int32_t Simulator::Call(byte* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + // Set up arguments + + // First four arguments passed in registers. + ASSERT(argument_count >= 4); + set_register(r0, va_arg(parameters, int32_t)); + set_register(r1, va_arg(parameters, int32_t)); + set_register(r2, va_arg(parameters, int32_t)); + set_register(r3, va_arg(parameters, int32_t)); + + // Remaining arguments passed on stack. + int original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); + if (OS::ActivationFrameAlignment() != 0) { + entry_stack &= -OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast(entry_stack); + for (int i = 4; i < argument_count; i++) { + stack_argument[i - 4] = va_arg(parameters, int32_t); + } + va_end(parameters); + set_register(sp, entry_stack); + + CallInternal(entry); // Pop stack passed arguments. CHECK_EQ(entry_stack, get_register(sp)); @@ -3369,6 +3396,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { } +double Simulator::CallFP(byte* entry, double d0, double d1) { + if (use_eabi_hardfloat()) { + set_d_register_from_double(0, d0); + set_d_register_from_double(1, d1); + } else { + int buffer[2]; + ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); + memcpy(buffer, &d0, sizeof(d0)); + set_dw_register(0, buffer); + memcpy(buffer, &d1, sizeof(d1)); + set_dw_register(2, buffer); + } + CallInternal(entry); + if (use_eabi_hardfloat()) { + return get_double_from_d_register(0); + } else { + return get_double_from_register_pair(0); + } +} + + uintptr_t Simulator::PushAddress(uintptr_t address) { int new_sp = get_register(sp) - sizeof(uintptr_t); uintptr_t* stack_slot = reinterpret_cast(new_sp); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index abc91bbc42..ec47fa1f1c 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -205,6 +205,8 @@ class Simulator { // generated RegExp code with 7 parameters. This is a convenience function, // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); + // Alternative: call a 2-argument double function. + double CallFP(byte* entry, double d0, double d1); // Push an address onto the JS stack. uintptr_t PushAddress(uintptr_t address); @@ -356,6 +358,8 @@ class Simulator { template void SetVFPRegister(int reg_index, const InputType& value); + void CallInternal(byte* entry); + // Architecture state. // Saturating instructions require a Q flag to indicate saturation. // There is currently no way to read the CPSR directly, and thus read the Q diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 66714f8e44..a194dfae5b 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -327,18 +327,23 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, Register dst, Register src, Handle holder, - int index) { - // Adjust for the number of properties stored in the holder. - index -= holder->map()->inobject_properties(); - if (index < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (index * kPointerSize); + PropertyIndex index) { + if (index.is_header_index()) { + int offset = index.header_index() * kPointerSize; __ ldr(dst, FieldMemOperand(src, offset)); } else { - // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; - __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - __ ldr(dst, FieldMemOperand(dst, offset)); + // Adjust for the number of properties stored in the holder. + int slot = index.field_index() - holder->map()->inobject_properties(); + if (slot < 0) { + // Get the property straight out of the holder. + int offset = holder->map()->instance_size() + (slot * kPointerSize); + __ ldr(dst, FieldMemOperand(src, offset)); + } else { + // Calculate the offset into the properties array. + int offset = slot * kPointerSize + FixedArray::kHeaderSize; + __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); + __ ldr(dst, FieldMemOperand(dst, offset)); + } } } @@ -1196,7 +1201,7 @@ void StubCompiler::GenerateLoadField(Handle object, Register scratch1, Register scratch2, Register scratch3, - int index, + PropertyIndex index, Handle name, Label* miss) { // Check that the receiver isn't a smi. @@ -1545,7 +1550,7 @@ void CallStubCompiler::GenerateMissBranch() { Handle CallStubCompiler::CompileCallField(Handle object, Handle holder, - int index, + PropertyIndex index, Handle name) { // ----------- S t a t e ------------- // -- r2 : name @@ -1618,7 +1623,7 @@ Handle CallStubCompiler::CompileArrayPushCall( Label call_builtin; if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier, check_double; Register elements = r6; Register end_elements = r5; @@ -1629,10 +1634,9 @@ Handle CallStubCompiler::CompileArrayPushCall( __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, - &call_builtin, + &check_double, DONT_DO_SMI_CHECK); - // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); @@ -1647,7 +1651,6 @@ Handle CallStubCompiler::CompileArrayPushCall( __ b(gt, &attempt_to_grow_elements); // Check if value is a smi. - Label with_write_barrier; __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ JumpIfNotSmi(r4, &with_write_barrier); @@ -1667,6 +1670,40 @@ Handle CallStubCompiler::CompileArrayPushCall( __ Drop(argc + 1); __ Ret(); + __ bind(&check_double); + + // Check that the elements are in fast mode and writable. + __ CheckMap(elements, + r0, + Heap::kFixedDoubleArrayMapRootIndex, + &call_builtin, + DONT_DO_SMI_CHECK); + + // Get the array's length into r0 and calculate new length. + __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ add(r0, r0, Operand(Smi::FromInt(argc))); + + // Get the elements' length. + __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); + + // Check if we could survive without allocation. + __ cmp(r0, r4); + __ b(gt, &call_builtin); + + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); + __ StoreNumberToDoubleElements( + r4, r0, elements, r3, r5, r2, r9, + &call_builtin, argc * kDoubleSize); + + // Save new length. + __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Check for a smi. + __ Drop(argc + 1); + __ Ret(); + __ bind(&with_write_barrier); __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); @@ -1678,6 +1715,11 @@ Handle CallStubCompiler::CompileArrayPushCall( // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(r3, r7, &call_builtin); + + __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(r7, ip); + __ b(eq, &call_builtin); // edx: receiver // r3: map Label try_holey_map; @@ -2912,7 +2954,7 @@ Handle LoadStubCompiler::CompileLoadNonexistent(Handle name, Handle LoadStubCompiler::CompileLoadField(Handle object, Handle holder, - int index, + PropertyIndex index, Handle name) { // ----------- S t a t e ------------- // -- r0 : receiver @@ -3101,7 +3143,7 @@ Handle LoadStubCompiler::CompileLoadGlobal( Handle KeyedLoadStubCompiler::CompileLoadField(Handle name, Handle receiver, Handle holder, - int index) { + PropertyIndex index) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key @@ -3467,7 +3509,13 @@ Handle ConstructStubCompiler::CompileConstructStub( // r1: constructor function // r2: initial map // r7: undefined + ASSERT(function->has_initial_map()); __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); +#ifdef DEBUG + int instance_size = function->initial_map()->instance_size(); + __ cmp(r3, Operand(instance_size >> kPointerSizeLog2)); + __ Check(eq, "Instance size of initial map changed."); +#endif __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); // Allocated the JSObject, now initialize the fields. Map is set to initial @@ -3525,7 +3573,6 @@ Handle ConstructStubCompiler::CompileConstructStub( } // Fill the unused in-object property fields with undefined. - ASSERT(function->has_initial_map()); for (int i = shared->this_property_assignments_count(); i < function->initial_map()->inobject_properties(); i++) { @@ -3646,6 +3693,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, Register scratch0, Register scratch1, DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, Label* fail) { if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); @@ -3662,13 +3710,12 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, __ sub(ip, key, Operand(kHeapObjectTag)); __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); __ EmitVFPTruncate(kRoundToZero, - double_scratch0.low(), - double_scratch0, scratch0, + double_scratch0, scratch1, + double_scratch1, kCheckForInexactConversion); __ b(ne, fail); - __ vmov(scratch0, double_scratch0.low()); __ TrySmiTag(scratch0, fail, scratch1); __ mov(key, scratch0); __ bind(&key_ok); @@ -3696,7 +3743,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); // r3: elements array @@ -3787,36 +3834,42 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ Ret(); __ bind(&box_int); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow); - // Now we can use r0 for the result as key is not needed any more. - __ mov(r0, r5); - if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't touch r0 or r1 as they are needed if allocation + // fails. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + + __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); + // Now we can use r0 for the result as key is not needed any more. + __ add(r0, r5, Operand(kHeapObjectTag)); __ vmov(s0, value); __ vcvt_f64_s32(d0, s0); - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); + __ vstr(d0, r5, HeapNumber::kValueOffset); __ Ret(); } else { - Register dst1 = r1; - Register dst2 = r3; + // Allocate a HeapNumber for the result and perform int-to-double + // conversion. Don't touch r0 or r1 as they are needed if allocation + // fails. + __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); + // Now we can use r0 for the result as key is not needed any more. + __ mov(r0, r5); + Register dst_mantissa = r1; + Register dst_exponent = r3; FloatingPointHelper::Destination dest = FloatingPointHelper::kCoreRegisters; FloatingPointHelper::ConvertIntToDouble(masm, value, dest, d0, - dst1, - dst2, + dst_mantissa, + dst_exponent, r9, s0); - __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ Ret(); } } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { @@ -3838,13 +3891,12 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all // registers - also when jumping due to exhausted young space. __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); __ vcvt_f64_u32(d0, s0); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); + __ vstr(d0, r2, HeapNumber::kValueOffset); - __ mov(r0, r2); + __ add(r0, r2, Operand(kHeapObjectTag)); __ Ret(); } else { // Check whether unsigned integer fits into smi. @@ -3876,7 +3928,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // clobbers all registers - also when jumping due to exhausted young // space. __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r7, r6, &slow); + __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); @@ -3893,19 +3945,18 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); __ vcvt_f64_f32(d0, s0); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); + __ vstr(d0, r2, HeapNumber::kValueOffset); - __ mov(r0, r2); + __ add(r0, r2, Operand(kHeapObjectTag)); __ Ret(); } else { // Allocate a HeapNumber for the result. Don't use r0 and r1 as // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r3, r4, r5, r6, &slow); + __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); // VFP is not available, do manual single to double conversion. // r2: floating point value (binary32) @@ -3961,18 +4012,17 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - __ sub(r1, r2, Operand(kHeapObjectTag)); - __ vstr(d0, r1, HeapNumber::kValueOffset); + __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); + __ vstr(d0, r2, HeapNumber::kValueOffset); - __ mov(r0, r2); + __ add(r0, r2, Operand(kHeapObjectTag)); __ Ret(); } else { // Allocate a HeapNumber for the result. Don't use r0 and r1 as // AllocateHeapNumber clobbers all registers - also when jumping due to // exhausted young space. __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r6, r7, &slow); + __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); @@ -4030,7 +4080,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -4088,7 +4138,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } FloatingPointHelper::ConvertIntToDouble( masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst1, dst2. + d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. r4, s2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kVFPRegisters) { CpuFeatures::Scope scope(VFP2); @@ -4147,7 +4197,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // not include -kHeapObjectTag into it. __ sub(r5, value, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); - __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); + __ EmitECMATruncate(r5, d0, d1, r6, r7, r9); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: @@ -4365,7 +4415,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); // Get the elements array. __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); @@ -4417,7 +4467,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); // Get the elements array. __ ldr(elements_reg, @@ -4439,7 +4489,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( // Non-NaN. Allocate a new heap number and copy the double value into it. __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, - heap_number_map, &slow_allocate_heapnumber); + heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); // Don't need to reload the upper 32 bits of the double, it's already in // scratch. @@ -4493,7 +4543,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); @@ -4640,9 +4690,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r1 : key // -- r2 : receiver // -- lr : return address - // -- r3 : scratch + // -- r3 : scratch (elements backing store) // -- r4 : scratch // -- r5 : scratch + // -- r6 : scratch + // -- r7 : scratch + // -- r9 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -4655,13 +4708,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = r5; Register scratch3 = r6; Register scratch4 = r7; + Register scratch5 = r9; Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4685,7 +4739,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&finish_store); __ StoreNumberToDoubleElements(value_reg, key_reg, - receiver_reg, + // All registers after this are overwritten. elements_reg, scratch1, scratch2, @@ -4733,8 +4787,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); - // Initialize the new FixedDoubleArray. Leave elements unitialized for - // efficiency, they are guaranteed to be initialized before use. + // Initialize the new FixedDoubleArray. __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); __ mov(scratch1, @@ -4742,6 +4795,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(scratch1, FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + __ mov(scratch1, elements_reg); + __ StoreNumberToDoubleElements(value_reg, + key_reg, + // All registers after this are overwritten. + scratch1, + scratch2, + scratch3, + scratch4, + scratch5, + &transition_elements_kind); + + __ mov(scratch1, Operand(kHoleNanLower32)); + __ mov(scratch2, Operand(kHoleNanUpper32)); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { + int offset = FixedDoubleArray::OffsetOfElementAt(i); + __ str(scratch1, FieldMemOperand(elements_reg, offset)); + __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); + } + // Install the new backing store in the JSArray. __ str(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4754,7 +4826,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ jmp(&finish_store); + __ Ret(); __ bind(&check_capacity); // Make sure that the backing store can hold additional elements. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 1cedd8d476..47f796d2b1 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -62,7 +62,7 @@ function GetSortedArrayKeys(array, intervals) { } } } - keys.sort(function(a, b) { return a - b; }); + %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort); return keys; } @@ -413,6 +413,7 @@ function ArrayJoin(separator) { ["Array.prototype.join"]); } + var length = TO_UINT32(this.length); if (IS_UNDEFINED(separator)) { separator = ','; } else if (!IS_STRING(separator)) { @@ -422,7 +423,7 @@ function ArrayJoin(separator) { var result = %_FastAsciiArrayJoin(this, separator); if (!IS_UNDEFINED(result)) return result; - return Join(this, TO_UINT32(this.length), separator, ConvertToString); + return Join(this, length, separator, ConvertToString); } @@ -441,8 +442,8 @@ function ArrayPop() { } n--; var value = this[n]; - this.length = n; delete this[n]; + this.length = n; return value; } @@ -581,7 +582,7 @@ function ArrayShift() { var first = this[0]; - if (IS_ARRAY(this)) { + if (IS_ARRAY(this) && !%IsObserved(this)) { SmartMove(this, 0, 1, len, 0); } else { SimpleMove(this, 0, 1, len, 0); @@ -602,7 +603,7 @@ function ArrayUnshift(arg1) { // length == 1 var len = TO_UINT32(this.length); var num_arguments = %_ArgumentsLength(); - if (IS_ARRAY(this)) { + if (IS_ARRAY(this) && !%IsObserved(this)) { SmartMove(this, 0, 0, len, num_arguments); } else { SimpleMove(this, 0, 0, len, num_arguments); @@ -649,6 +650,7 @@ function ArraySlice(start, end) { if (end_i < start_i) return result; if (IS_ARRAY(this) && + !%IsObserved(this) && (end_i > 1000) && (%EstimateNumberOfElements(this) < end_i)) { SmartSlice(this, start_i, end_i - start_i, len, result); @@ -705,7 +707,9 @@ function ArraySplice(start, delete_count) { var use_simple_splice = true; - if (IS_ARRAY(this) && num_additional_args !== del_count) { + if (IS_ARRAY(this) && + !%IsObserved(this) && + num_additional_args !== del_count) { // If we are only deleting/moving a few things near the end of the // array then the simple version is going to be faster, because it // doesn't touch most of the array. @@ -1549,9 +1553,11 @@ function SetUpArray() { // exposed to user code. // Adding only the functions that are actually used. SetUpLockedPrototype(InternalArray, $Array(), $Array( + "indexOf", getFunction("indexOf", ArrayIndexOf), "join", getFunction("join", ArrayJoin), "pop", getFunction("pop", ArrayPop), - "push", getFunction("push", ArrayPush) + "push", getFunction("push", ArrayPush), + "splice", getFunction("splice", ArraySplice) )); } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index a58f77f74b..25157be2eb 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -103,15 +103,78 @@ static DoubleConstant double_constants; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; +static bool math_exp_data_initialized = false; +static Mutex* math_exp_data_mutex = NULL; +static double* math_exp_constants_array = NULL; +static double* math_exp_log_table_array = NULL; + // ----------------------------------------------------------------------------- // Implementation of AssemblerBase -AssemblerBase::AssemblerBase(Isolate* isolate) +AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) : isolate_(isolate), - jit_cookie_(0) { + jit_cookie_(0), + emit_debug_code_(FLAG_debug_code), + predictable_code_size_(false) { if (FLAG_mask_constants_with_cookie && isolate != NULL) { jit_cookie_ = V8::RandomPrivate(isolate); } + + if (buffer == NULL) { + // Do our own buffer management. + if (buffer_size <= kMinimalBufferSize) { + buffer_size = kMinimalBufferSize; + if (isolate->assembler_spare_buffer() != NULL) { + buffer = isolate->assembler_spare_buffer(); + isolate->set_assembler_spare_buffer(NULL); + } + } + if (buffer == NULL) buffer = NewArray(buffer_size); + own_buffer_ = true; + } else { + // Use externally provided buffer instead. + ASSERT(buffer_size > 0); + own_buffer_ = false; + } + buffer_ = static_cast(buffer); + buffer_size_ = buffer_size; + + pc_ = buffer_; +} + + +AssemblerBase::~AssemblerBase() { + if (own_buffer_) { + if (isolate() != NULL && + isolate()->assembler_spare_buffer() == NULL && + buffer_size_ == kMinimalBufferSize) { + isolate()->set_assembler_spare_buffer(buffer_); + } else { + DeleteArray(buffer_); + } + } +} + + +// ----------------------------------------------------------------------------- +// Implementation of PredictableCodeSizeScope + +PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler, + int expected_size) + : assembler_(assembler), + expected_size_(expected_size), + start_offset_(assembler->pc_offset()), + old_value_(assembler->predictable_code_size()) { + assembler_->set_predictable_code_size(true); +} + + +PredictableCodeSizeScope::~PredictableCodeSizeScope() { + // TODO(svenpanne) Remove the 'if' when everything works. + if (expected_size_ >= 0) { + CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_); + } + assembler_->set_predictable_code_size(old_value_); } @@ -313,6 +376,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { #ifdef DEBUG byte* begin_pos = pos_; #endif + ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM <= kMaxStandardNonCompactModes); @@ -570,6 +634,15 @@ void RelocIterator::next() { } } } + if (code_age_sequence_ != NULL) { + byte* old_code_age_sequence = code_age_sequence_; + code_age_sequence_ = NULL; + if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) { + rinfo_.data_ = 0; + rinfo_.pc_ = old_code_age_sequence; + return; + } + } done_ = true; } @@ -585,6 +658,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; + byte* sequence = code->FindCodeAgeSequence(); + if (sequence != NULL && !Code::IsYoungSequence(sequence)) { + code_age_sequence_ = sequence; + } else { + code_age_sequence_ = NULL; + } if (mode_mask_ == 0) pos_ = end_; next(); } @@ -600,6 +679,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; + code_age_sequence_ = NULL; if (mode_mask_ == 0) pos_ = end_; next(); } @@ -652,6 +732,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { UNREACHABLE(); #endif return "debug break slot"; + case RelocInfo::CODE_AGE_SEQUENCE: + return "code_age_sequence"; case RelocInfo::NUMBER_OF_MODES: UNREACHABLE(); return "number_of_modes"; @@ -697,7 +779,7 @@ void RelocInfo::Print(FILE* out) { #endif // ENABLE_DISASSEMBLER -#ifdef DEBUG +#ifdef VERIFY_HEAP void RelocInfo::Verify() { switch (rmode_) { case EMBEDDED_OBJECT: @@ -717,12 +799,12 @@ void RelocInfo::Verify() { case CODE_TARGET: { // convert inline target address to code object Address addr = target_address(); - ASSERT(addr != NULL); + CHECK(addr != NULL); // Check that we can find the right code object. Code* code = Code::GetCodeFromTargetAddress(addr); Object* found = HEAP->FindCodeObject(addr); - ASSERT(found->IsCode()); - ASSERT(code->address() == HeapObject::cast(found)->address()); + CHECK(found->IsCode()); + CHECK(code->address() == HeapObject::cast(found)->address()); break; } case RUNTIME_ENTRY: @@ -739,9 +821,12 @@ void RelocInfo::Verify() { case NUMBER_OF_MODES: UNREACHABLE(); break; + case CODE_AGE_SEQUENCE: + ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); + break; } } -#endif // DEBUG +#endif // VERIFY_HEAP // ----------------------------------------------------------------------------- @@ -756,6 +841,70 @@ void ExternalReference::SetUp() { double_constants.canonical_non_hole_nan = OS::nan_value(); double_constants.the_hole_nan = BitCast(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; + + math_exp_data_mutex = OS::CreateMutex(); +} + + +void ExternalReference::InitializeMathExpData() { + // Early return? + if (math_exp_data_initialized) return; + + math_exp_data_mutex->Lock(); + if (!math_exp_data_initialized) { + // If this is changed, generated code must be adapted too. + const int kTableSizeBits = 11; + const int kTableSize = 1 << kTableSizeBits; + const double kTableSizeDouble = static_cast(kTableSize); + + math_exp_constants_array = new double[9]; + // Input values smaller than this always return 0. + math_exp_constants_array[0] = -708.39641853226408; + // Input values larger than this always return +Infinity. + math_exp_constants_array[1] = 709.78271289338397; + math_exp_constants_array[2] = V8_INFINITY; + // The rest is black magic. Do not attempt to understand it. It is + // loosely based on the "expd" function published at: + // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html + const double constant3 = (1 << kTableSizeBits) / log(2.0); + math_exp_constants_array[3] = constant3; + math_exp_constants_array[4] = + static_cast(static_cast(3) << 51); + math_exp_constants_array[5] = 1 / constant3; + math_exp_constants_array[6] = 3.0000000027955394; + math_exp_constants_array[7] = 0.16666666685227835; + math_exp_constants_array[8] = 1; + + math_exp_log_table_array = new double[kTableSize]; + for (int i = 0; i < kTableSize; i++) { + double value = pow(2, i / kTableSizeDouble); + + uint64_t bits = BitCast(value); + bits &= (static_cast(1) << 52) - 1; + double mantissa = BitCast(bits); + + // + uint64_t doublebits; + memcpy(&doublebits, &value, sizeof doublebits); + doublebits &= (static_cast(1) << 52) - 1; + double mantissa2; + memcpy(&mantissa2, &doublebits, sizeof mantissa2); + CHECK_EQ(mantissa, mantissa2); + // + + math_exp_log_table_array[i] = mantissa; + } + + math_exp_data_initialized = true; + } + math_exp_data_mutex->Unlock(); +} + + +void ExternalReference::TearDownMathExpData() { + delete[] math_exp_constants_array; + delete[] math_exp_log_table_array; + delete math_exp_data_mutex; } @@ -874,6 +1023,13 @@ ExternalReference ExternalReference::get_date_field_function( } +ExternalReference ExternalReference::get_make_code_young_function( + Isolate* isolate) { + return ExternalReference(Redirect( + isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung))); +} + + ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { return ExternalReference(isolate->date_cache()->stamp_address()); } @@ -900,6 +1056,20 @@ ExternalReference ExternalReference::compute_output_frames_function( } +ExternalReference ExternalReference::log_enter_external_function( + Isolate* isolate) { + return ExternalReference( + Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal))); +} + + +ExternalReference ExternalReference::log_leave_external_function( + Isolate* isolate) { + return ExternalReference( + Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal))); +} + + ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); } @@ -1186,6 +1356,19 @@ ExternalReference ExternalReference::math_log_double_function( } +ExternalReference ExternalReference::math_exp_constants(int constant_index) { + ASSERT(math_exp_data_initialized); + return ExternalReference( + reinterpret_cast(math_exp_constants_array + constant_index)); +} + + +ExternalReference ExternalReference::math_exp_log_table() { + ASSERT(math_exp_data_initialized); + return ExternalReference(reinterpret_cast(math_exp_log_table_array)); +} + + ExternalReference ExternalReference::page_flags(Page* page) { return ExternalReference(reinterpret_cast
(page) + MemoryChunk::kFlagsOffset); diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index cb5a72d755..4639374c22 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -56,18 +56,56 @@ struct StatsCounter; class AssemblerBase: public Malloced { public: - explicit AssemblerBase(Isolate* isolate); + AssemblerBase(Isolate* isolate, void* buffer, int buffer_size); + virtual ~AssemblerBase(); Isolate* isolate() const { return isolate_; } - int jit_cookie() { return jit_cookie_; } + int jit_cookie() const { return jit_cookie_; } + + bool emit_debug_code() const { return emit_debug_code_; } + void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + + bool predictable_code_size() const { return predictable_code_size_; } + void set_predictable_code_size(bool value) { predictable_code_size_ = value; } // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for // cross-snapshotting. static void QuietNaN(HeapObject* nan) { } + int pc_offset() const { return static_cast(pc_ - buffer_); } + + static const int kMinimalBufferSize = 4*KB; + + protected: + // The buffer into which code and relocation info are generated. It could + // either be owned by the assembler or be provided externally. + byte* buffer_; + int buffer_size_; + bool own_buffer_; + + // The program counter, which points into the buffer above and moves forward. + byte* pc_; + private: Isolate* isolate_; int jit_cookie_; + bool emit_debug_code_; + bool predictable_code_size_; +}; + + +// Avoids using instructions that vary in size in unpredictable ways between the +// snapshot and the running VM. +class PredictableCodeSizeScope { + public: + PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size); + ~PredictableCodeSizeScope(); + + private: + AssemblerBase* assembler_; + int expected_size_; + int start_offset_; + bool old_value_; }; @@ -211,6 +249,12 @@ class RelocInfo BASE_EMBEDDED { // Pseudo-types NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding. NONE, // never recorded + CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by + // code aging. + FIRST_REAL_RELOC_MODE = CODE_TARGET, + LAST_REAL_RELOC_MODE = CONST_POOL, + FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, + LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, LAST_CODE_ENUM = DEBUG_BREAK, LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL, // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. @@ -225,6 +269,15 @@ class RelocInfo BASE_EMBEDDED { : pc_(pc), rmode_(rmode), data_(data), host_(host) { } + static inline bool IsRealRelocMode(Mode mode) { + return mode >= FIRST_REAL_RELOC_MODE && + mode <= LAST_REAL_RELOC_MODE; + } + static inline bool IsPseudoRelocMode(Mode mode) { + ASSERT(!IsRealRelocMode(mode)); + return mode >= FIRST_PSEUDO_RELOC_MODE && + mode <= LAST_PSEUDO_RELOC_MODE; + } static inline bool IsConstructCall(Mode mode) { return mode == CONSTRUCT_CALL; } @@ -262,6 +315,9 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsDebugBreakSlot(Mode mode) { return mode == DEBUG_BREAK_SLOT; } + static inline bool IsCodeAgeSequence(Mode mode) { + return mode == CODE_AGE_SEQUENCE; + } static inline int ModeMask(Mode mode) { return 1 << mode; } // Accessors @@ -294,7 +350,8 @@ class RelocInfo BASE_EMBEDDED { INLINE(Handle target_cell_handle()); INLINE(void set_target_cell(JSGlobalPropertyCell* cell, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); - + INLINE(Code* code_age_stub()); + INLINE(void set_code_age_stub(Code* stub)); // Read the address of the word containing the target_address in an // instruction stream. What this means exactly is architecture-independent. @@ -349,8 +406,7 @@ class RelocInfo BASE_EMBEDDED { static const char* RelocModeName(Mode rmode); void Print(FILE* out); #endif // ENABLE_DISASSEMBLER -#ifdef DEBUG - // Debugging +#ifdef VERIFY_HEAP void Verify(); #endif @@ -369,19 +425,17 @@ class RelocInfo BASE_EMBEDDED { Mode rmode_; intptr_t data_; Code* host_; -#ifdef V8_TARGET_ARCH_MIPS - // Code and Embedded Object pointers in mips are stored split + // Code and Embedded Object pointers on some platforms are stored split // across two consecutive 32-bit instructions. Heap management // routines expect to access these pointers indirectly. The following - // location provides a place for these pointers to exist natually + // location provides a place for these pointers to exist naturally // when accessed via the Iterator. Object* reconstructed_obj_ptr_; // External-reference pointers are also split across instruction-pairs - // in mips, but are accessed via indirect pointers. This location + // on some platforms, but are accessed via indirect pointers. This location // provides a place for that pointer to exist naturally. Its address // is returned by RelocInfo::target_reference_address(). Address reconstructed_adr_ptr_; -#endif // V8_TARGET_ARCH_MIPS friend class RelocIterator; }; @@ -490,6 +544,7 @@ class RelocIterator: public Malloced { byte* pos_; byte* end_; + byte* code_age_sequence_; RelocInfo rinfo_; bool done_; int mode_mask_; @@ -549,6 +604,8 @@ class ExternalReference BASE_EMBEDDED { }; static void SetUp(); + static void InitializeMathExpData(); + static void TearDownMathExpData(); typedef void* ExternalReferenceRedirector(void* original, Type type); @@ -598,10 +655,16 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference get_date_field_function(Isolate* isolate); static ExternalReference date_cache_stamp(Isolate* isolate); + static ExternalReference get_make_code_young_function(Isolate* isolate); + // Deoptimization support. static ExternalReference new_deoptimizer_function(Isolate* isolate); static ExternalReference compute_output_frames_function(Isolate* isolate); + // Log support. + static ExternalReference log_enter_external_function(Isolate* isolate); + static ExternalReference log_leave_external_function(Isolate* isolate); + // Static data in the keyed lookup cache. static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); @@ -668,6 +731,9 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate); + static ExternalReference math_exp_constants(int constant_index); + static ExternalReference math_exp_log_table(); + static ExternalReference page_flags(Page* page); Address address() const {return reinterpret_cast
(address_);} diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 6b68705d27..232cb739a1 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -103,6 +103,7 @@ VariableProxy::VariableProxy(Isolate* isolate, void VariableProxy::BindTo(Variable* var) { ASSERT(var_ == NULL); // must be bound only once ASSERT(var != NULL); // must bind + ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface())); ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name())); // Ideally CONST-ness should match. However, this is very hard to achieve // because we don't know the exact semantics of conflicting (const and @@ -126,8 +127,6 @@ Assignment::Assignment(Isolate* isolate, pos_(pos), binary_operation_(NULL), assignment_id_(GetNextId(isolate)), - block_start_(false), - block_end_(false), is_monomorphic_(false) { } @@ -478,6 +477,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { TypeInfo info = oracle->SwitchType(this); + if (info.IsUninitialized()) info = TypeInfo::Unknown(); if (info.IsSmi()) { compare_type_ = SMI_ONLY; } else if (info.IsSymbol()) { @@ -606,18 +606,6 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { } -void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - TypeInfo info = oracle->CompareType(this); - if (info.IsSmi()) { - compare_type_ = SMI_ONLY; - } else if (info.IsNonPrimitive()) { - compare_type_ = OBJECT_ONLY; - } else { - ASSERT(compare_type_ == NONE); - } -} - - void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this) ? oracle->GetObjectLiteralStoreMap(this) @@ -1072,16 +1060,14 @@ REGULAR_NODE(CallNew) // LOOKUP variables only result from constructs that cannot be inlined anyway. REGULAR_NODE(VariableProxy) -// We currently do not optimize any modules. Note in particular, that module -// instance objects associated with ModuleLiterals are allocated during -// scope resolution, and references to them are embedded into the code. -// That code may hence neither be cached nor re-compiled. +// We currently do not optimize any modules. DONT_OPTIMIZE_NODE(ModuleDeclaration) DONT_OPTIMIZE_NODE(ImportDeclaration) DONT_OPTIMIZE_NODE(ExportDeclaration) DONT_OPTIMIZE_NODE(ModuleVariable) DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModuleUrl) +DONT_OPTIMIZE_NODE(ModuleStatement) DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement) diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index e72296cff7..d299f19a23 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -75,6 +75,7 @@ namespace internal { #define STATEMENT_NODE_LIST(V) \ V(Block) \ + V(ModuleStatement) \ V(ExpressionStatement) \ V(EmptyStatement) \ V(IfStatement) \ @@ -522,7 +523,7 @@ class ModuleDeclaration: public Declaration { ModuleDeclaration(VariableProxy* proxy, Module* module, Scope* scope) - : Declaration(proxy, LET, scope), + : Declaration(proxy, MODULE, scope), module_(module) { } @@ -645,6 +646,25 @@ class ModuleUrl: public Module { }; +class ModuleStatement: public Statement { + public: + DECLARE_NODE_TYPE(ModuleStatement) + + VariableProxy* proxy() const { return proxy_; } + Block* body() const { return body_; } + + protected: + ModuleStatement(VariableProxy* proxy, Block* body) + : proxy_(proxy), + body_(body) { + } + + private: + VariableProxy* proxy_; + Block* body_; +}; + + class IterationStatement: public BreakableStatement { public: // Type testing & conversion. @@ -1417,7 +1437,7 @@ class VariableProxy: public Expression { void MarkAsTrivial() { is_trivial_ = true; } void MarkAsLValue() { is_lvalue_ = true; } - // Bind this proxy to the variable var. + // Bind this proxy to the variable var. Interfaces must match. void BindTo(Variable* var); protected: @@ -1777,9 +1797,6 @@ class CompareOperation: public Expression { // Type feedback information. TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } - bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle* check); @@ -1796,8 +1813,7 @@ class CompareOperation: public Expression { op_(op), left_(left), right_(right), - pos_(pos), - compare_type_(NONE) { + pos_(pos) { ASSERT(Token::IsCompareOp(op)); } @@ -1806,9 +1822,6 @@ class CompareOperation: public Expression { Expression* left_; Expression* right_; int pos_; - - enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY }; - CompareTypeFeedback compare_type_; }; @@ -1870,15 +1883,6 @@ class Assignment: public Expression { // This check relies on the definition order of token in token.h. bool is_compound() const { return op() > Token::ASSIGN; } - // An initialization block is a series of statments of the form - // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and - // ending of these blocks to allow for optimizations of initialization - // blocks. - bool starts_initialization_block() { return block_start_; } - bool ends_initialization_block() { return block_end_; } - void mark_block_start() { block_start_ = true; } - void mark_block_end() { block_end_ = true; } - BailoutId AssignmentId() const { return assignment_id_; } // Type feedback information. @@ -1911,9 +1915,6 @@ class Assignment: public Expression { BinaryOperation* binary_operation_; const BailoutId assignment_id_; - bool block_start_; - bool block_end_; - bool is_monomorphic_; SmallMapList receiver_types_; }; @@ -2659,6 +2660,11 @@ class AstNodeFactory BASE_EMBEDDED { STATEMENT_WITH_LABELS(SwitchStatement) #undef STATEMENT_WITH_LABELS + ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) { + ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body); + VISIT_AND_RETURN(ModuleStatement, stmt) + } + ExpressionStatement* NewExpressionStatement(Expression* expression) { ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression); VISIT_AND_RETURN(ExpressionStatement, stmt) diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h index e2057ed073..da33b29685 100644 --- a/deps/v8/src/atomicops.h +++ b/deps/v8/src/atomicops.h @@ -69,7 +69,11 @@ typedef intptr_t Atomic64; // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or // Atomic64 routines below, depending on your architecture. +#if defined(__OpenBSD__) && defined(__i386__) +typedef Atomic32 AtomicWord; +#else typedef intptr_t AtomicWord; +#endif // Atomically execute: // result = *ptr; @@ -147,7 +151,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); } } // namespace v8::internal // Include our platform specific implementation. -#if defined(_MSC_VER) && \ +#if defined(THREAD_SANITIZER) +#include "atomicops_internals_tsan.h" +#elif defined(_MSC_VER) && \ (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) #include "atomicops_internals_x86_msvc.h" #elif defined(__APPLE__) && \ diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h new file mode 100644 index 0000000000..6559336ad9 --- /dev/null +++ b/deps/v8/src/atomicops_internals_tsan.h @@ -0,0 +1,335 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// This file is an internal atomic implementation for compiler-based +// ThreadSanitizer. Use base/atomicops.h instead. + +#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ +#define V8_ATOMICOPS_INTERNALS_TSAN_H_ + +// This struct is not part of the public API of this module; clients may not +// use it. (However, it's exported via BASE_EXPORT because clients implicitly +// do use it at link time by inlining these functions.) +// Features of this x86. Values may not be correct before main() is run, +// but are set conservatively. +struct AtomicOps_x86CPUFeatureStruct { + bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence + // after acquire compare-and-swap. + bool has_sse2; // Processor has SSE2. +}; +extern struct AtomicOps_x86CPUFeatureStruct + AtomicOps_Internalx86CPUFeatures; + +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") + +namespace v8 { +namespace internal { + +#ifndef TSAN_INTERFACE_ATOMIC_H +#define TSAN_INTERFACE_ATOMIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; // NOLINT +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; // NOLINT + +typedef enum { + __tsan_memory_order_relaxed = (1 << 0) + 100500, + __tsan_memory_order_consume = (1 << 1) + 100500, + __tsan_memory_order_acquire = (1 << 2) + 100500, + __tsan_memory_order_release = (1 << 3) + 100500, + __tsan_memory_order_acq_rel = (1 << 4) + 100500, + __tsan_memory_order_seq_cst = (1 << 5) + 100500, +} __tsan_memory_order; + +__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, + __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, + __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, + __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, + __tsan_memory_order mo); + +void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, + __tsan_memory_order mo); +void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, + __tsan_memory_order mo); +void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, + __tsan_memory_order mo); +void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, + __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); +int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); +int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); +int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); + +int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); +int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); +int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); +int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); + +void __tsan_atomic_thread_fence(__tsan_memory_order mo); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // #ifndef TSAN_INTERFACE_ATOMIC_H + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_acquire); +} + +inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_release); +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release); + return cmp; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release); + return cmp; +} + +inline void MemoryBarrier() { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +} // namespace internal +} // namespace v8 + +#undef ATOMICOPS_COMPILER_BARRIER + +#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 992659edce..8d529506f2 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -384,7 +384,7 @@ static Handle InstallFunction(Handle target, void Genesis::SetFunctionInstanceDescriptor( Handle map, PrototypePropertyMode prototypeMode) { int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; - Handle descriptors(factory()->NewDescriptorArray(size)); + Handle descriptors(factory()->NewDescriptorArray(0, size)); DescriptorArray::WhitenessWitness witness(*descriptors); Handle length(factory()->NewForeign(&Accessors::FunctionLength)); @@ -397,7 +397,7 @@ void Genesis::SetFunctionInstanceDescriptor( } PropertyAttributes attribs = static_cast( DONT_ENUM | DONT_DELETE | READ_ONLY); - Map::SetDescriptors(map, descriptors); + map->set_instance_descriptors(*descriptors); { // Add length. CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); @@ -525,7 +525,7 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { void Genesis::SetStrictFunctionInstanceDescriptor( Handle map, PrototypePropertyMode prototypeMode) { int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5; - Handle descriptors(factory()->NewDescriptorArray(size)); + Handle descriptors(factory()->NewDescriptorArray(0, size)); DescriptorArray::WhitenessWitness witness(*descriptors); Handle length(factory()->NewForeign(&Accessors::FunctionLength)); @@ -538,7 +538,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor( } PropertyAttributes attribs = static_cast( DONT_ENUM | DONT_DELETE); - Map::SetDescriptors(map, descriptors); + map->set_instance_descriptors(*descriptors); { // Add length. CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); @@ -637,7 +637,7 @@ static void SetAccessors(Handle map, Handle name, Handle func) { DescriptorArray* descs = map->instance_descriptors(); - int number = descs->Search(*name); + int number = descs->SearchWithCache(*name, *map); AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number)); accessors->set_getter(*func); accessors->set_setter(*func); @@ -868,13 +868,14 @@ bool Genesis::InitializeGlobal(Handle inner_global, array_function->shared()->set_length(1); Handle initial_map(array_function->initial_map()); - Handle array_descriptors(factory->NewDescriptorArray(1)); + Handle array_descriptors( + factory->NewDescriptorArray(0, 1)); DescriptorArray::WhitenessWitness witness(*array_descriptors); Handle array_length(factory->NewForeign(&Accessors::ArrayLength)); PropertyAttributes attribs = static_cast( DONT_ENUM | DONT_DELETE); - Map::SetDescriptors(initial_map, array_descriptors); + initial_map->set_instance_descriptors(*array_descriptors); { // Add length. CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs); @@ -915,14 +916,15 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle string_map = Handle(native_context()->string_function()->initial_map()); - Handle string_descriptors(factory->NewDescriptorArray(1)); + Handle string_descriptors( + factory->NewDescriptorArray(0, 1)); DescriptorArray::WhitenessWitness witness(*string_descriptors); Handle string_length( factory->NewForeign(&Accessors::StringLength)); PropertyAttributes attribs = static_cast( DONT_ENUM | DONT_DELETE | READ_ONLY); - Map::SetDescriptors(string_map, string_descriptors); + string_map->set_instance_descriptors(*string_descriptors); { // Add length. CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs); @@ -956,9 +958,9 @@ bool Genesis::InitializeGlobal(Handle inner_global, PropertyAttributes final = static_cast(DONT_ENUM | DONT_DELETE | READ_ONLY); - Handle descriptors = factory->NewDescriptorArray(5); + Handle descriptors = factory->NewDescriptorArray(0, 5); DescriptorArray::WhitenessWitness witness(*descriptors); - Map::SetDescriptors(initial_map, descriptors); + initial_map->set_instance_descriptors(*descriptors); { // ECMA-262, section 15.10.7.1. @@ -1082,11 +1084,11 @@ bool Genesis::InitializeGlobal(Handle inner_global, LookupResult lookup(isolate); result->LocalLookup(heap->callee_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex); result->LocalLookup(heap->length_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1140,9 +1142,9 @@ bool Genesis::InitializeGlobal(Handle inner_global, Handle map = factory->NewMap(JS_OBJECT_TYPE, Heap::kArgumentsObjectSizeStrict); // Create the descriptor array for the arguments object. - Handle descriptors = factory->NewDescriptorArray(3); + Handle descriptors = factory->NewDescriptorArray(0, 3); DescriptorArray::WhitenessWitness witness(*descriptors); - Map::SetDescriptors(map, descriptors); + map->set_instance_descriptors(*descriptors); { // length FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM); @@ -1184,7 +1186,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, LookupResult lookup(isolate); result->LocalLookup(heap->length_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1238,8 +1240,9 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Initialize the out of memory slot. native_context()->set_out_of_memory(heap->false_value()); - // Initialize the data slot. - native_context()->set_data(heap->undefined_value()); + // Initialize the embedder data slot. + Handle embedder_data = factory->NewFixedArray(2); + native_context()->set_embedder_data(*embedder_data); { // Initialize the random seed slot. @@ -1338,7 +1341,7 @@ bool Genesis::CompileScriptCached(Vector name, // If we can't find the function in the cache, we compile a new // function and insert it into the cache. if (cache == NULL || !cache->Lookup(name, &function_info)) { - ASSERT(source->IsAsciiRepresentation()); + ASSERT(source->IsOneByteRepresentation()); Handle script_name = factory->NewStringFromUtf8(name); function_info = Compiler::Compile( source, @@ -1413,6 +1416,11 @@ void Genesis::InstallExperimentalNativeFunctions() { INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap); INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate); } + if (FLAG_harmony_observation) { + INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change); + INSTALL_NATIVE(JSFunction, "DeliverChangeRecords", + observers_deliver_changes); + } } #undef INSTALL_NATIVE @@ -1487,7 +1495,7 @@ bool Genesis::InstallNatives() { Handle script_map = Handle(script_fun->initial_map()); Handle script_descriptors( - factory()->NewDescriptorArray(13)); + factory()->NewDescriptorArray(0, 13)); DescriptorArray::WhitenessWitness witness(*script_descriptors); Handle script_source( @@ -1532,7 +1540,7 @@ bool Genesis::InstallNatives() { factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName)); PropertyAttributes attribs = static_cast(DONT_ENUM | DONT_DELETE | READ_ONLY); - Map::SetDescriptors(script_map, script_descriptors); + script_map->set_instance_descriptors(*script_descriptors); { CallbacksDescriptor d( @@ -1665,14 +1673,15 @@ bool Genesis::InstallNatives() { // Make "length" magic on instances. Handle initial_map(array_function->initial_map()); - Handle array_descriptors(factory()->NewDescriptorArray(1)); + Handle array_descriptors( + factory()->NewDescriptorArray(0, 1)); DescriptorArray::WhitenessWitness witness(*array_descriptors); Handle array_length(factory()->NewForeign( &Accessors::ArrayLength)); PropertyAttributes attribs = static_cast( DONT_ENUM | DONT_DELETE); - Map::SetDescriptors(initial_map, array_descriptors); + initial_map->set_instance_descriptors(*array_descriptors); { // Add length. CallbacksDescriptor d( @@ -1765,16 +1774,17 @@ bool Genesis::InstallNatives() { // Update map with length accessor from Array and add "index" and "input". Handle reresult_descriptors = - factory()->NewDescriptorArray(3); + factory()->NewDescriptorArray(0, 3); DescriptorArray::WhitenessWitness witness(*reresult_descriptors); - Map::SetDescriptors(initial_map, reresult_descriptors); + initial_map->set_instance_descriptors(*reresult_descriptors); { JSFunction* array_function = native_context()->array_function(); Handle array_descriptors( array_function->initial_map()->instance_descriptors()); String* length = heap()->length_symbol(); - int old = array_descriptors->SearchWithCache(length); + int old = array_descriptors->SearchWithCache( + length, array_function->initial_map()); ASSERT(old != DescriptorArray::kNotFound); CallbacksDescriptor desc(length, array_descriptors->GetValue(old), @@ -1802,7 +1812,7 @@ bool Genesis::InstallNatives() { native_context()->set_regexp_result_map(*initial_map); } -#ifdef DEBUG +#ifdef VERIFY_HEAP builtins->Verify(); #endif @@ -1824,6 +1834,11 @@ bool Genesis::InstallExperimentalNatives() { "native collection.js") == 0) { if (!CompileExperimentalBuiltin(isolate(), i)) return false; } + if (FLAG_harmony_observation && + strcmp(ExperimentalNatives::GetScriptName(i).start(), + "native object-observe.js") == 0) { + if (!CompileExperimentalBuiltin(isolate(), i)) return false; + } } InstallExperimentalNativeFunctions(); diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 179e65c354..d61c0313f2 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -54,7 +54,7 @@ class SourceCodeCache BASE_EMBEDDED { bool Lookup(Vector name, Handle* handle) { for (int i = 0; i < cache_->length(); i+=2) { - SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); + SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i)); if (str->IsEqualTo(name)) { *handle = Handle( SharedFunctionInfo::cast(cache_->get(i + 1))); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index ffaaf8b1ea..d62713db4c 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -268,7 +268,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); } FixedArrayBase* elms; - if (!maybe_elms->To(&elms)) return maybe_elms; + if (!maybe_elms->To(&elms)) return maybe_elms; // Fill in the content switch (array->GetElementsKind()) { @@ -325,6 +325,18 @@ BUILTIN(ArrayCodeGeneric) { } +static void MoveDoubleElements(FixedDoubleArray* dst, + int dst_index, + FixedDoubleArray* src, + int src_index, + int len) { + if (len == 0) return; + memmove(dst->data_start() + dst_index, + src->data_start() + src_index, + len * kDoubleSize); +} + + static void MoveElements(Heap* heap, AssertNoAllocation* no_gc, FixedArray* dst, @@ -351,24 +363,39 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) { } -static FixedArray* LeftTrimFixedArray(Heap* heap, - FixedArray* elms, - int to_trim) { +static void FillWithHoles(FixedDoubleArray* dst, int from, int to) { + for (int i = from; i < to; i++) { + dst->set_the_hole(i); + } +} + + +static FixedArrayBase* LeftTrimFixedArray(Heap* heap, + FixedArrayBase* elms, + int to_trim) { + Map* map = elms->map(); + int entry_size; + if (elms->IsFixedArray()) { + entry_size = kPointerSize; + } else { + entry_size = kDoubleSize; + } ASSERT(elms->map() != HEAP->fixed_cow_array_map()); // For now this trick is only applied to fixed arrays in new and paged space. // In large object space the object's start must coincide with chunk // and thus the trick is just not applicable. ASSERT(!HEAP->lo_space()->Contains(elms)); - STATIC_ASSERT(FixedArray::kMapOffset == 0); - STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); - STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize); + STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); + STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); Object** former_start = HeapObject::RawField(elms, 0); const int len = elms->length(); - if (to_trim > FixedArray::kHeaderSize / kPointerSize && + if (to_trim * entry_size > FixedArrayBase::kHeaderSize && + elms->IsFixedArray() && !heap->new_space()->Contains(elms)) { // If we are doing a big trim in old space then we zap the space that was // formerly part of the array so that the GC (aided by the card-based @@ -382,14 +409,15 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, // Technically in new space this write might be omitted (except for // debug mode which iterates through the heap), but to play safer // we still do it. - heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize); + heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size); - former_start[to_trim] = heap->fixed_array_map(); - former_start[to_trim + 1] = Smi::FromInt(len - to_trim); + int new_start_index = to_trim * (entry_size / kPointerSize); + former_start[new_start_index] = map; + former_start[new_start_index + 1] = Smi::FromInt(len - to_trim); // Maintain marking consistency for HeapObjectIterator and // IncrementalMarking. - int size_delta = to_trim * kPointerSize; + int size_delta = to_trim * entry_size; if (heap->marking()->TransferMark(elms->address(), elms->address() + size_delta)) { MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); @@ -397,8 +425,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(), elms->address() + size_delta)); - return FixedArray::cast(HeapObject::FromAddress( - elms->address() + to_trim * kPointerSize)); + return FixedArrayBase::cast(HeapObject::FromAddress( + elms->address() + to_trim * entry_size)); } @@ -427,19 +455,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( Map* map = elms->map(); if (map == heap->fixed_array_map()) { if (args == NULL || array->HasFastObjectElements()) return elms; - if (array->HasFastDoubleElements()) { - ASSERT(elms == heap->empty_fixed_array()); - MaybeObject* maybe_transition = - array->TransitionElementsKind(FAST_ELEMENTS); - if (maybe_transition->IsFailure()) return maybe_transition; - return elms; - } } else if (map == heap->fixed_cow_array_map()) { MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); if (args == NULL || array->HasFastObjectElements() || - maybe_writable_result->IsFailure()) { + !maybe_writable_result->To(&elms)) { return maybe_writable_result; } + } else if (map == heap->fixed_double_array_map()) { + if (args == NULL) return elms; } else { return NULL; } @@ -449,13 +472,28 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( int args_length = args->length(); if (first_added_arg >= args_length) return array->elements(); - MaybeObject* maybe_array = array->EnsureCanContainElements( - args, - first_added_arg, - args_length - first_added_arg, - DONT_ALLOW_DOUBLE_ELEMENTS); - if (maybe_array->IsFailure()) return maybe_array; - return array->elements(); + ElementsKind origin_kind = array->map()->elements_kind(); + ASSERT(!IsFastObjectElementsKind(origin_kind)); + ElementsKind target_kind = origin_kind; + int arg_count = args->length() - first_added_arg; + Object** arguments = args->arguments() - first_added_arg - (arg_count - 1); + for (int i = 0; i < arg_count; i++) { + Object* arg = arguments[i]; + if (arg->IsHeapObject()) { + if (arg->IsHeapNumber()) { + target_kind = FAST_DOUBLE_ELEMENTS; + } else { + target_kind = FAST_ELEMENTS; + break; + } + } + } + if (target_kind != origin_kind) { + MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind); + if (maybe_failure->IsFailure()) return maybe_failure; + return array->elements(); + } + return elms; } @@ -499,127 +537,200 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( BUILTIN(ArrayPush) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); - if (maybe_elms_obj == NULL) { - return CallJsBuiltin(isolate, "ArrayPush", args); - } - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); + if (maybe_elms_obj == NULL) { + return CallJsBuiltin(isolate, "ArrayPush", args); } - FixedArray* elms = FixedArray::cast(elms_obj); - JSArray* array = JSArray::cast(receiver); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; - int len = Smi::cast(array->length())->value(); - int to_add = args.length() - 1; - if (to_add == 0) { - return Smi::FromInt(len); + if (FLAG_harmony_observation && + JSObject::cast(receiver)->map()->is_observed()) { + return CallJsBuiltin(isolate, "ArrayPush", args); } - // Currently fixed arrays cannot grow too big, so - // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); - int new_length = len + to_add; + JSArray* array = JSArray::cast(receiver); + ElementsKind kind = array->GetElementsKind(); - if (new_length > elms->length()) { - // New backing storage is needed. - int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + if (IsFastSmiOrObjectElementsKind(kind)) { + FixedArray* elms = FixedArray::cast(elms_obj); + + int len = Smi::cast(array->length())->value(); + int to_add = args.length() - 1; + if (to_add == 0) { + return Smi::FromInt(len); } - FixedArray* new_elms = FixedArray::cast(obj); + // Currently fixed arrays cannot grow too big, so + // we should never hit this case. + ASSERT(to_add <= (Smi::kMaxValue - len)); - ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); - FillWithHoles(heap, new_elms, new_length, capacity); + int new_length = len + to_add; - elms = new_elms; - } + if (new_length > elms->length()) { + // New backing storage is needed. + int capacity = new_length + (new_length >> 1) + 16; + FixedArray* new_elms; + MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; - // Add the provided values. - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int index = 0; index < to_add; index++) { - elms->set(index + len, args[index + 1], mode); - } + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, new_elms, kind, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); - if (elms != array->elements()) { - array->set_elements(elms); - } + elms = new_elms; + } - // Set the length. - array->set_length(Smi::FromInt(new_length)); - return Smi::FromInt(new_length); + // Add the provided values. + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int index = 0; index < to_add; index++) { + elms->set(index + len, args[index + 1], mode); + } + + if (elms != array->elements()) { + array->set_elements(elms); + } + + // Set the length. + array->set_length(Smi::FromInt(new_length)); + return Smi::FromInt(new_length); + } else { + int len = Smi::cast(array->length())->value(); + int elms_len = elms_obj->length(); + + int to_add = args.length() - 1; + if (to_add == 0) { + return Smi::FromInt(len); + } + // Currently fixed arrays cannot grow too big, so + // we should never hit this case. + ASSERT(to_add <= (Smi::kMaxValue - len)); + + int new_length = len + to_add; + + FixedDoubleArray* new_elms; + + if (new_length > elms_len) { + // New backing storage is needed. + int capacity = new_length + (new_length >> 1) + 16; + MaybeObject* maybe_obj = + heap->AllocateUninitializedFixedDoubleArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; + + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, new_elms, kind, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + } else { + // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the + // empty_fixed_array. + new_elms = FixedDoubleArray::cast(elms_obj); + } + + // Add the provided values. + AssertNoAllocation no_gc; + int index; + for (index = 0; index < to_add; index++) { + Object* arg = args[index + 1]; + new_elms->set(index + len, arg->Number()); + } + + if (new_elms != array->elements()) { + array->set_elements(new_elms); + } + + // Set the length. + array->set_length(Smi::FromInt(new_length)); + return Smi::FromInt(new_length); + } } BUILTIN(ArrayPop) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } - FixedArray* elms = FixedArray::cast(elms_obj); + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); + if (!maybe_elms->To(&elms_obj)) return maybe_elms; + JSArray* array = JSArray::cast(receiver); + if (FLAG_harmony_observation && array->map()->is_observed()) { + return CallJsBuiltin(isolate, "ArrayPop", args); + } + int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); - // Get top element - MaybeObject* top = elms->get(len - 1); - - // Set the length. - array->set_length(Smi::FromInt(len - 1)); - - if (!top->IsTheHole()) { - // Delete the top element. - elms->set_the_hole(len - 1); - return top; + ElementsAccessor* accessor = array->GetElementsAccessor(); + int new_length = len - 1; + MaybeObject* maybe_result; + if (accessor->HasElement(array, array, new_length, elms_obj)) { + maybe_result = accessor->Get(array, array, new_length, elms_obj); + } else { + maybe_result = array->GetPrototype()->GetElement(len - 1); } - - top = array->GetPrototype()->GetElement(len - 1); - - return top; + if (maybe_result->IsFailure()) return maybe_result; + MaybeObject* maybe_failure = + accessor->SetLength(array, Smi::FromInt(new_length)); + if (maybe_failure->IsFailure()) return maybe_failure; + return maybe_result; } BUILTIN(ArrayShift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayShift", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayShift", args); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayShift", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + + if (FLAG_harmony_observation && array->map()->is_observed()) { + return CallJsBuiltin(isolate, "ArrayShift", args); + } int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); // Get first element - Object* first = elms->get(0); + ElementsAccessor* accessor = array->GetElementsAccessor(); + Object* first; + MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj); + if (!maybe_first->To(&first)) return maybe_first; if (first->IsTheHole()) { first = heap->undefined_value(); } - if (!heap->lo_space()->Contains(elms)) { - array->set_elements(LeftTrimFixedArray(heap, elms, 1)); + if (!heap->lo_space()->Contains(elms_obj)) { + array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1)); } else { // Shift the elements. - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1); - elms->set(len - 1, heap->the_hole_value()); + if (elms_obj->IsFixedArray()) { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1); + elms->set(len - 1, heap->the_hole_value()); + } else { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, 0, elms, 1, len - 1); + elms->set_the_hole(len - 1); + } } // Set the length. @@ -632,19 +743,25 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayUnshift", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayUnshift", args); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayUnshift", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + if (!array->HasFastSmiOrObjectElements()) { + return CallJsBuiltin(isolate, "ArrayUnshift", args); + } + FixedArray* elms = FixedArray::cast(elms_obj); + + if (FLAG_harmony_observation && array->map()->is_observed()) { + return CallJsBuiltin(isolate, "ArrayUnshift", args); + } int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -661,14 +778,18 @@ BUILTIN(ArrayUnshift) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* new_elms = FixedArray::cast(obj); + FixedArray* new_elms; + MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_elms->To(&new_elms)) return maybe_elms; + ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); - FillWithHoles(heap, new_elms, new_length, capacity); + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, new_elms, kind, to_add, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + elms = new_elms; array->set_elements(elms); } else { @@ -692,16 +813,20 @@ BUILTIN(ArrayUnshift) { BUILTIN(ArraySlice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArray* elms; + FixedArrayBase* elms; int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastSmiOrObjectElements() || - !IsJSArrayFastElementMovingAllowed(heap, array)) { + if (!IsJSArrayFastElementMovingAllowed(heap, array)) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + + if (array->HasFastElements()) { + elms = array->elements(); + } else { return CallJsBuiltin(isolate, "ArraySlice", args); } - elms = FixedArray::cast(array->elements()); len = Smi::cast(array->length())->value(); } else { // Array.slice(arguments, ...) is quite a common idiom (notably more @@ -710,15 +835,19 @@ BUILTIN(ArraySlice) { isolate->context()->native_context()->arguments_boilerplate()->map(); bool is_arguments_object_with_fast_elements = - receiver->IsJSObject() - && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastSmiOrObjectElements(); + receiver->IsJSObject() && + JSObject::cast(receiver)->map() == arguments_map; if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } - elms = FixedArray::cast(JSObject::cast(receiver)->elements()); - Object* len_obj = JSObject::cast(receiver) - ->InObjectPropertyAt(Heap::kArgumentsLengthIndex); + JSObject* object = JSObject::cast(receiver); + + if (object->HasFastElements()) { + elms = object->elements(); + } else { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex); if (!len_obj->IsSmi()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -726,12 +855,27 @@ BUILTIN(ArraySlice) { if (len > elms->length()) { return CallJsBuiltin(isolate, "ArraySlice", args); } + } + + JSObject* object = JSObject::cast(receiver); + ElementsKind kind = object->GetElementsKind(); + + if (IsHoleyElementsKind(kind)) { + bool packed = true; + ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); for (int i = 0; i < len; i++) { - if (elms->get(i) == heap->the_hole_value()) { - return CallJsBuiltin(isolate, "ArraySlice", args); + if (!accessor->HasElement(object, object, i, elms)) { + packed = false; + break; } } + if (packed) { + kind = GetPackedElementsKind(kind); + } else if (!receiver->IsJSArray()) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } } + ASSERT(len >= 0); int n_arguments = args.length() - 1; @@ -744,6 +888,12 @@ BUILTIN(ArraySlice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); + } else if (arg1->IsHeapNumber()) { + double start = HeapNumber::cast(arg1)->value(); + if (start < kMinInt || start > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + relative_start = static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -751,6 +901,12 @@ BUILTIN(ArraySlice) { Object* arg2 = args[2]; if (arg2->IsSmi()) { relative_end = Smi::cast(arg2)->value(); + } else if (arg2->IsHeapNumber()) { + double end = HeapNumber::cast(arg2)->value(); + if (end < kMinInt || end > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + relative_end = static_cast(end); } else if (!arg2->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -765,21 +921,24 @@ BUILTIN(ArraySlice) { int final = (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len); - ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind(); - // Calculate the length of result array. int result_len = Max(final - k, 0); - MaybeObject* maybe_array = - heap->AllocateJSArrayAndStorage(elements_kind, - result_len, - result_len); JSArray* result_array; + MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind, + result_len, + result_len); + + AssertNoAllocation no_gc; + if (result_len == 0) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array; - CopyObjectToObjectElements(elms, elements_kind, k, - FixedArray::cast(result_array->elements()), - elements_kind, 0, result_len); + ElementsAccessor* accessor = object->GetElementsAccessor(); + MaybeObject* maybe_failure = + accessor->CopyElements(NULL, k, result_array->elements(), + kind, 0, result_len, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); return result_array; } @@ -788,19 +947,22 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArraySplice", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); + if (maybe_elms == NULL) { + return CallJsBuiltin(isolate, "ArraySplice", args); } + if (!maybe_elms->To(&elms_obj)) return maybe_elms; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArraySplice", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + + if (FLAG_harmony_observation && array->map()->is_observed()) { + return CallJsBuiltin(isolate, "ArraySplice", args); + } int len = Smi::cast(array->length())->value(); @@ -811,6 +973,12 @@ BUILTIN(ArraySplice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); + } else if (arg1->IsHeapNumber()) { + double start = HeapNumber::cast(arg1)->value(); + if (start < kMinInt || start > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySplice", args); + } + relative_start = static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySplice", args); } @@ -840,51 +1008,84 @@ BUILTIN(ArraySplice) { actual_delete_count = Min(Max(value, 0), len - actual_start); } + ElementsKind elements_kind = array->GetElementsKind(); + + int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; + int new_length = len - actual_delete_count + item_count; + + // For double mode we do not support changing the length. + if (new_length > len && IsFastDoubleElementsKind(elements_kind)) { + return CallJsBuiltin(isolate, "ArraySplice", args); + } + + if (new_length == 0) { + MaybeObject* maybe_array = heap->AllocateJSArrayWithElements( + elms_obj, elements_kind, actual_delete_count); + if (maybe_array->IsFailure()) return maybe_array; + array->set_elements(heap->empty_fixed_array()); + array->set_length(Smi::FromInt(0)); + return maybe_array; + } + JSArray* result_array = NULL; - ElementsKind elements_kind = - JSObject::cast(receiver)->GetElementsKind(); MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, actual_delete_count, actual_delete_count); if (!maybe_array->To(&result_array)) return maybe_array; - { - // Fill newly created array. - CopyObjectToObjectElements(elms, elements_kind, actual_start, - FixedArray::cast(result_array->elements()), - elements_kind, 0, actual_delete_count); + if (actual_delete_count > 0) { + AssertNoAllocation no_gc; + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = + accessor->CopyElements(NULL, actual_start, result_array->elements(), + elements_kind, 0, actual_delete_count, elms_obj); + // Cannot fail since the origin and target array are of the same elements + // kind. + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); } - int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actual_delete_count + item_count; - bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. - const bool trim_array = !heap->lo_space()->Contains(elms) && + const bool trim_array = !heap->lo_space()->Contains(elms_obj) && ((actual_start + item_count) < (len - actual_delete_count - actual_start)); if (trim_array) { const int delta = actual_delete_count - item_count; - { + if (elms_obj->IsFixedDoubleArray()) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, delta, elms, 0, actual_start); + } else { + FixedArray* elms = FixedArray::cast(elms_obj); AssertNoAllocation no_gc; MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start); } - elms = LeftTrimFixedArray(heap, elms, delta); + elms_obj = LeftTrimFixedArray(heap, elms_obj, delta); elms_changed = true; } else { - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, - elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); - FillWithHoles(heap, elms, new_length, len); + if (elms_obj->IsFixedDoubleArray()) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(elms, new_length, len); + } else { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + MoveElements(heap, &no_gc, + elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(heap, elms, new_length, len); + } } } else if (item_count > actual_delete_count) { + FixedArray* elms = FixedArray::cast(elms_obj); // Currently fixed arrays cannot grow too big, so // we should never hit this case. ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); @@ -893,28 +1094,29 @@ BUILTIN(ArraySplice) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = - heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* new_elms = FixedArray::cast(obj); + FixedArray* new_elms; + MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; - { + AssertNoAllocation no_gc; + + ElementsKind kind = array->GetElementsKind(); + ElementsAccessor* accessor = array->GetElementsAccessor(); + if (actual_start > 0) { // Copy the part before actual_start as is. - ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, - new_elms, kind, 0, actual_start); - const int to_copy = len - actual_delete_count - actual_start; - CopyObjectToObjectElements(elms, kind, - actual_start + actual_delete_count, - new_elms, kind, - actual_start + item_count, to_copy); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, new_elms, kind, 0, actual_start, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); } - - FillWithHoles(heap, new_elms, new_length, capacity); - - elms = new_elms; + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, actual_start + actual_delete_count, new_elms, kind, + actual_start + item_count, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + + elms_obj = new_elms; elms_changed = true; } else { AssertNoAllocation no_gc; @@ -925,16 +1127,28 @@ BUILTIN(ArraySplice) { } } - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int k = actual_start; k < actual_start + item_count; k++) { - elms->set(k, args[3 + k - actual_start], mode); + if (IsFastDoubleElementsKind(elements_kind)) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + for (int k = actual_start; k < actual_start + item_count; k++) { + Object* arg = args[3 + k - actual_start]; + if (arg->IsSmi()) { + elms->set(k, Smi::cast(arg)->value()); + } else { + elms->set(k, HeapNumber::cast(arg)->value()); + } + } + } else { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int k = actual_start; k < actual_start + item_count; k++) { + elms->set(k, args[3 + k - actual_start], mode); + } } if (elms_changed) { - array->set_elements(elms); + array->set_elements(elms_obj); } - // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -956,14 +1170,15 @@ BUILTIN(ArrayConcat) { int n_arguments = args.length(); int result_len = 0; ElementsKind elements_kind = GetInitialFastElementsKind(); + bool has_double = false; + bool is_holey = false; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; if (!arg->IsJSArray() || - !JSArray::cast(arg)->HasFastSmiOrObjectElements() || + !JSArray::cast(arg)->HasFastElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - int len = Smi::cast(JSArray::cast(arg)->length())->value(); // We shouldn't overflow when adding another len. @@ -973,47 +1188,51 @@ BUILTIN(ArrayConcat) { result_len += len; ASSERT(result_len >= 0); - if (result_len > FixedArray::kMaxLength) { + if (result_len > FixedDoubleArray::kMaxLength) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - if (!JSArray::cast(arg)->HasFastSmiElements()) { - if (IsFastSmiElementsKind(elements_kind)) { - if (IsFastHoleyElementsKind(elements_kind)) { - elements_kind = FAST_HOLEY_ELEMENTS; - } else { - elements_kind = FAST_ELEMENTS; - } - } - } - - if (JSArray::cast(arg)->HasFastHoleyElements()) { - elements_kind = GetHoleyElementsKind(elements_kind); + ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind(); + has_double = has_double || IsFastDoubleElementsKind(arg_kind); + is_holey = is_holey || IsFastHoleyElementsKind(arg_kind); + if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) { + elements_kind = arg_kind; } } - // Allocate result. + if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind); + + // If a double array is concatted into a fast elements array, the fast + // elements array needs to be initialized to contain proper holes, since + // boxing doubles may cause incremental marking. + ArrayStorageAllocationMode mode = + has_double && IsFastObjectElementsKind(elements_kind) + ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS; JSArray* result_array; + // Allocate result. MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, result_len, - result_len); + result_len, + mode); if (!maybe_array->To(&result_array)) return maybe_array; if (result_len == 0) return result_array; - // Copy data. - int start_pos = 0; - FixedArray* result_elms(FixedArray::cast(result_array->elements())); + int j = 0; + FixedArrayBase* storage = result_array->elements(); for (int i = 0; i < n_arguments; i++) { JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); - FixedArray* elms = FixedArray::cast(array->elements()); - CopyObjectToObjectElements(elms, elements_kind, 0, - result_elms, elements_kind, - start_pos, len); - start_pos += len; + if (len > 0) { + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = + accessor->CopyElements(array, 0, storage, elements_kind, j, len); + if (maybe_failure->IsFailure()) return maybe_failure; + j += len; + } } - ASSERT(start_pos == result_len); + + ASSERT(j == result_len); return result_array; } @@ -1033,12 +1252,28 @@ BUILTIN(StrictModePoisonPill) { // +// Searches the hidden prototype chain of the given object for the first +// object that is an instance of the given type. If no such object can +// be found then Heap::null_value() is returned. +static inline Object* FindHidden(Heap* heap, + Object* object, + FunctionTemplateInfo* type) { + if (object->IsInstanceOf(type)) return object; + Object* proto = object->GetPrototype(); + if (proto->IsJSObject() && + JSObject::cast(proto)->map()->is_hidden_prototype()) { + return FindHidden(heap, proto, type); + } + return heap->null_value(); +} + + // Returns the holder JSObject if the function can legally be called // with this receiver. Returns Heap::null_value() if the call is // illegal. Any arguments that don't fit the expected type is -// overwritten with undefined. Arguments that do fit the expected -// type is overwritten with the object in the prototype chain that -// actually has that type. +// overwritten with undefined. Note that holder and the arguments are +// implicitly rewritten with the first object in the hidden prototype +// chain that actually has the expected type. static inline Object* TypeCheck(Heap* heap, int argc, Object** argv, @@ -1051,15 +1286,10 @@ static inline Object* TypeCheck(Heap* heap, SignatureInfo* sig = SignatureInfo::cast(sig_obj); // If necessary, check the receiver Object* recv_type = sig->receiver(); - Object* holder = recv; if (!recv_type->IsUndefined()) { - for (; holder != heap->null_value(); holder = holder->GetPrototype()) { - if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) { - break; - } - } - if (holder == heap->null_value()) return holder; + holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type)); + if (holder == heap->null_value()) return heap->null_value(); } Object* args_obj = sig->args(); // If there is no argument signature we're done @@ -1072,13 +1302,9 @@ static inline Object* TypeCheck(Heap* heap, if (argtype->IsUndefined()) continue; Object** arg = &argv[-1 - i]; Object* current = *arg; - for (; current != heap->null_value(); current = current->GetPrototype()) { - if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) { - *arg = current; - break; - } - } - if (current == heap->null_value()) *arg = heap->undefined_value(); + current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype)); + if (current == heap->null_value()) current = heap->undefined_value(); + *arg = current; } return holder; } @@ -1620,7 +1846,7 @@ void Builtins::SetUp(bool create_heap_objects) { // For now we generate builtin adaptor code into a stack-allocated // buffer, before copying it into individual code objects. Be careful // with alignment, some platforms don't like unaligned code. - union { int force_alignment; byte buffer[4*KB]; } u; + union { int force_alignment; byte buffer[8*KB]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index ca70ae5403..a2f752e052 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -38,6 +38,25 @@ enum BuiltinExtraArguments { }; +#define CODE_AGE_LIST_WITH_ARG(V, A) \ + V(Quadragenarian, A) \ + V(Quinquagenarian, A) \ + V(Sexagenarian, A) \ + V(Septuagenarian, A) \ + V(Octogenarian, A) + +#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X) + +#define CODE_AGE_LIST(V) \ + CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V) + +#define DECLARE_CODE_AGE_BUILTIN(C, V) \ + V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \ + UNINITIALIZED, Code::kNoExtraICState) \ + V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \ + UNINITIALIZED, Code::kNoExtraICState) + + // Define list of builtins implemented in C++. #define BUILTIN_LIST_C(V) \ V(Illegal, NO_EXTRA_ARGUMENTS) \ @@ -195,8 +214,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ \ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) - + Code::kNoExtraICState) \ + CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V) #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. @@ -379,6 +398,14 @@ class Builtins { static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); +#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \ + static void Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm); \ + static void Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm); + CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR) +#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR + static void InitBuiltinFunctionTable(); bool initialized_; diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 59a4cdf823..276c87ebd0 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,11 +37,11 @@ namespace v8 { namespace internal { -bool CodeStub::FindCodeInCache(Code** code_out) { - Heap* heap = Isolate::Current()->heap(); - int index = heap->code_stubs()->FindEntry(GetKey()); +bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { + UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); + int index = stubs->FindEntry(GetKey()); if (index != UnseededNumberDictionary::kNotFound) { - *code_out = Code::cast(heap->code_stubs()->ValueAt(index)); + *code_out = Code::cast(stubs->ValueAt(index)); return true; } return false; @@ -93,8 +93,8 @@ Handle CodeStub::GetCode() { Heap* heap = isolate->heap(); Code* code; if (UseSpecialCache() - ? FindCodeInSpecialCache(&code) - : FindCodeInCache(&code)) { + ? FindCodeInSpecialCache(&code, isolate) + : FindCodeInCache(&code, isolate)) { ASSERT(IsPregenerated() == code->is_pregenerated()); return Handle(code); } @@ -142,7 +142,9 @@ Handle CodeStub::GetCode() { } Activate(code); - ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); + ASSERT(!NeedsImmovableCode() || + heap->lo_space()->Contains(code) || + heap->code_space()->FirstPage()->Contains(code->address())); return Handle(code, isolate); } @@ -167,6 +169,122 @@ void CodeStub::PrintName(StringStream* stream) { } +void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + + BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_); + if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) { + // The OddballStub handles a number and an oddball, not two oddballs. + operands_type = BinaryOpIC::GENERIC; + } + switch (operands_type) { + case BinaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case BinaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case BinaryOpIC::INT32: + GenerateInt32Stub(masm); + break; + case BinaryOpIC::HEAP_NUMBER: + GenerateHeapNumberStub(masm); + break; + case BinaryOpIC::ODDBALL: + GenerateOddballStub(masm); + break; + case BinaryOpIC::STRING: + GenerateStringStub(masm); + break; + case BinaryOpIC::GENERIC: + GenerateGeneric(masm); + break; + default: + UNREACHABLE(); + } +} + + +#define __ ACCESS_MASM(masm) + + +void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { + switch (op_) { + case Token::ADD: + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +#undef __ + + +void BinaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + stream->Add("BinaryOpStub_%s_%s_%s+%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(left_type_), + BinaryOpIC::GetName(right_type_)); +} + + +void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING); + ASSERT(op_ == Token::ADD); + if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) { + GenerateBothStringStub(masm); + return; + } + // Try to add arguments as strings, otherwise, transition to the generic + // BinaryOpIC type. + GenerateAddStrings(masm); + GenerateTypeTransition(masm); +} + + void ICCompareStub::AddToSpecialCache(Handle new_object) { ASSERT(*known_map_ != NULL); Isolate* isolate = new_object->GetIsolate(); @@ -179,8 +297,7 @@ void ICCompareStub::AddToSpecialCache(Handle new_object) { } -bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { - Isolate* isolate = known_map_->GetIsolate(); +bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { Factory* factory = isolate->factory(); Code::Flags flags = Code::ComputeFlags( static_cast(GetCodeKind()), @@ -194,7 +311,12 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { flags)); if (probe->IsCode()) { *code_out = Code::cast(*probe); - ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ); +#ifdef DEBUG + Token::Value cached_op; + ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL, + &cached_op); + ASSERT(op_ == cached_op); +#endif return true; } return false; @@ -202,7 +324,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { int ICCompareStub::MinorKey() { - return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); + return OpField::encode(op_ - Token::EQ) | + LeftStateField::encode(left_) | + RightStateField::encode(right_) | + HandlerStateField::encode(state_); +} + + +void ICCompareStub::DecodeMinorKey(int minor_key, + CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, + Token::Value* op) { + if (left_state) { + *left_state = + static_cast(LeftStateField::decode(minor_key)); + } + if (right_state) { + *right_state = + static_cast(RightStateField::decode(minor_key)); + } + if (handler_state) { + *handler_state = + static_cast(HandlerStateField::decode(minor_key)); + } + if (op) { + *op = static_cast(OpField::decode(minor_key) + Token::EQ); + } } @@ -211,27 +359,28 @@ void ICCompareStub::Generate(MacroAssembler* masm) { case CompareIC::UNINITIALIZED: GenerateMiss(masm); break; - case CompareIC::SMIS: + case CompareIC::SMI: GenerateSmis(masm); break; - case CompareIC::HEAP_NUMBERS: + case CompareIC::HEAP_NUMBER: GenerateHeapNumbers(masm); break; - case CompareIC::STRINGS: + case CompareIC::STRING: GenerateStrings(masm); break; - case CompareIC::SYMBOLS: + case CompareIC::SYMBOL: GenerateSymbols(masm); break; - case CompareIC::OBJECTS: + case CompareIC::OBJECT: GenerateObjects(masm); break; case CompareIC::KNOWN_OBJECTS: ASSERT(*known_map_ != NULL); GenerateKnownObjects(masm); break; - default: - UNREACHABLE(); + case CompareIC::GENERIC: + GenerateGeneric(masm); + break; } } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index f19063230a..ae113f5729 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -141,7 +141,7 @@ class CodeStub BASE_EMBEDDED { bool CompilingCallsToThisStubIsGCSafe() { bool is_pregenerated = IsPregenerated(); Code* code = NULL; - CHECK(!is_pregenerated || FindCodeInCache(&code)); + CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current())); return is_pregenerated; } @@ -160,7 +160,10 @@ class CodeStub BASE_EMBEDDED { virtual bool SometimesSetsUpAFrame() { return true; } // Lookup the code in the (possibly custom) cache. - bool FindCodeInCache(Code** code_out); + bool FindCodeInCache(Code** code_out, Isolate* isolate); + + protected: + static bool CanUseFPRegisters(); private: // Nonvirtual wrapper around the stub-specific Generate function. Call @@ -199,7 +202,9 @@ class CodeStub BASE_EMBEDDED { virtual void AddToSpecialCache(Handle new_object) { } // Find code in a specialized cache, work is delegated to the specific stub. - virtual bool FindCodeInSpecialCache(Code** code_out) { return false; } + virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { + return false; + } // If a stub uses a special cache override this. virtual bool UseSpecialCache() { return false; } @@ -479,10 +484,132 @@ class MathPowStub: public CodeStub { }; +class BinaryOpStub: public CodeStub { + public: + BinaryOpStub(Token::Value op, OverwriteMode mode) + : op_(op), + mode_(mode), + platform_specific_bit_(false), + left_type_(BinaryOpIC::UNINITIALIZED), + right_type_(BinaryOpIC::UNINITIALIZED), + result_type_(BinaryOpIC::UNINITIALIZED) { + Initialize(); + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + BinaryOpStub( + int key, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + platform_specific_bit_(PlatformSpecificBits::decode(key)), + left_type_(left_type), + right_type_(right_type), + result_type_(result_type) { } + + static void decode_types_from_minor_key(int minor_key, + BinaryOpIC::TypeInfo* left_type, + BinaryOpIC::TypeInfo* right_type, + BinaryOpIC::TypeInfo* result_type) { + *left_type = + static_cast(LeftTypeBits::decode(minor_key)); + *right_type = + static_cast(RightTypeBits::decode(minor_key)); + *result_type = + static_cast(ResultTypeBits::decode(minor_key)); + } + + static Token::Value decode_op_from_minor_key(int minor_key) { + return static_cast(OpBits::decode(minor_key)); + } + + enum SmiCodeGenerateHeapNumberResults { + ALLOW_HEAPNUMBER_RESULTS, + NO_HEAPNUMBER_RESULTS + }; + + private: + Token::Value op_; + OverwriteMode mode_; + bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM. + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo left_type_; + BinaryOpIC::TypeInfo right_type_; + BinaryOpIC::TypeInfo result_type_; + + virtual void PrintName(StringStream* stream); + + // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class PlatformSpecificBits: public BitField {}; + class LeftTypeBits: public BitField {}; + class RightTypeBits: public BitField {}; + class ResultTypeBits: public BitField {}; + + Major MajorKey() { return BinaryOp; } + int MinorKey() { + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | PlatformSpecificBits::encode(platform_specific_bit_) + | LeftTypeBits::encode(left_type_) + | RightTypeBits::encode(right_type_) + | ResultTypeBits::encode(result_type_); + } + + + // Platform-independent implementation. + void Generate(MacroAssembler* masm); + void GenerateCallRuntime(MacroAssembler* masm); + + // Platform-independent signature, platform-specific implementation. + void Initialize(); + void GenerateAddStrings(MacroAssembler* masm); + void GenerateBothStringStub(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); + void GenerateGenericStub(MacroAssembler* masm); + void GenerateHeapNumberStub(MacroAssembler* masm); + void GenerateInt32Stub(MacroAssembler* masm); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateOddballStub(MacroAssembler* masm); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateSmiStub(MacroAssembler* masm); + void GenerateStringStub(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); + void GenerateUninitializedStub(MacroAssembler* masm); + + // Entirely platform-specific methods are defined as static helper + // functions in the /code-stubs-.cc files. + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(Max(left_type_, right_type_)); + } + + virtual void FinishCode(Handle code) { + code->set_stub_info(MinorKey()); + } + + friend class CodeGenerator; +}; + + class ICCompareStub: public CodeStub { public: - ICCompareStub(Token::Value op, CompareIC::State state) - : op_(op), state_(state) { + ICCompareStub(Token::Value op, + CompareIC::State left, + CompareIC::State right, + CompareIC::State handler) + : op_(op), + left_(left), + right_(right), + state_(handler) { ASSERT(Token::IsCompareOp(op)); } @@ -490,13 +617,24 @@ class ICCompareStub: public CodeStub { void set_known_map(Handle map) { known_map_ = map; } + static void DecodeMinorKey(int minor_key, + CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, + Token::Value* op); + + static CompareIC::State CompareState(int minor_key) { + return static_cast(HandlerStateField::decode(minor_key)); + } + private: class OpField: public BitField { }; - class StateField: public BitField { }; + class LeftStateField: public BitField { }; + class RightStateField: public BitField { }; + class HandlerStateField: public BitField { }; virtual void FinishCode(Handle code) { - code->set_compare_state(state_); - code->set_compare_operation(op_ - Token::EQ); + code->set_stub_info(MinorKey()); } virtual CodeStub::Major MajorKey() { return CompareIC; } @@ -511,117 +649,23 @@ class ICCompareStub: public CodeStub { void GenerateObjects(MacroAssembler* masm); void GenerateMiss(MacroAssembler* masm); void GenerateKnownObjects(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); bool strict() const { return op_ == Token::EQ_STRICT; } Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } virtual void AddToSpecialCache(Handle new_object); - virtual bool FindCodeInSpecialCache(Code** code_out); + virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate); virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; } Token::Value op_; + CompareIC::State left_; + CompareIC::State right_; CompareIC::State state_; Handle known_map_; }; -// Flags that control the compare stub code generation. -enum CompareFlags { - NO_COMPARE_FLAGS = 0, - NO_SMI_COMPARE_IN_STUB = 1 << 0, - NO_NUMBER_COMPARE_IN_STUB = 1 << 1, - CANT_BOTH_BE_NAN = 1 << 2 -}; - - -enum NaNInformation { - kBothCouldBeNaN, - kCantBothBeNaN -}; - - -class CompareStub: public CodeStub { - public: - CompareStub(Condition cc, - bool strict, - CompareFlags flags, - Register lhs, - Register rhs) : - cc_(cc), - strict_(strict), - never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), - include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), - include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), - lhs_(lhs), - rhs_(rhs) { } - - CompareStub(Condition cc, - bool strict, - CompareFlags flags) : - cc_(cc), - strict_(strict), - never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), - include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), - include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), - lhs_(no_reg), - rhs_(no_reg) { } - - void Generate(MacroAssembler* masm); - - private: - Condition cc_; - bool strict_; - // Only used for 'equal' comparisons. Tells the stub that we already know - // that at least one side of the comparison is not NaN. This allows the - // stub to use object identity in the positive case. We ignore it when - // generating the minor key for other comparisons to avoid creating more - // stubs. - bool never_nan_nan_; - // Do generate the number comparison code in the stub. Stubs without number - // comparison code is used when the number comparison has been inlined, and - // the stub will be called if one of the operands is not a number. - bool include_number_compare_; - - // Generate the comparison code for two smi operands in the stub. - bool include_smi_compare_; - - // Register holding the left hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - - Register lhs_; - // Register holding the right hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - Register rhs_; - - // Encoding of the minor key in 16 bits. - class StrictField: public BitField {}; - class NeverNanNanField: public BitField {}; - class IncludeNumberCompareField: public BitField {}; - class IncludeSmiCompareField: public BitField {}; - class RegisterField: public BitField {}; - class ConditionField: public BitField {}; - - Major MajorKey() { return Compare; } - - int MinorKey(); - - virtual int GetCodeKind() { return Code::COMPARE_IC; } - virtual void FinishCode(Handle code) { - code->set_compare_state(CompareIC::GENERIC); - } - - // Branch to the label if the given object isn't a symbol. - void BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch); - - // Unfortunately you have to run without snapshots to see most of these - // names in the profile since most compare stubs end up in the snapshot. - virtual void PrintName(StringStream* stream); -}; - - class CEntryStub : public CodeStub { public: explicit CEntryStub(int result_size, @@ -998,13 +1042,15 @@ class KeyedStoreElementStub : public CodeStub { KeyedAccessGrowMode grow_mode) : is_js_array_(is_js_array), elements_kind_(elements_kind), - grow_mode_(grow_mode) { } + grow_mode_(grow_mode), + fp_registers_(CanUseFPRegisters()) { } Major MajorKey() { return KeyedStoreElement; } int MinorKey() { return ElementsKindBits::encode(elements_kind_) | IsJSArrayBits::encode(is_js_array_) | - GrowModeBits::encode(grow_mode_); + GrowModeBits::encode(grow_mode_) | + FPRegisters::encode(fp_registers_); } void Generate(MacroAssembler* masm); @@ -1013,10 +1059,12 @@ class KeyedStoreElementStub : public CodeStub { class ElementsKindBits: public BitField {}; class GrowModeBits: public BitField {}; class IsJSArrayBits: public BitField {}; + class FPRegisters: public BitField {}; bool is_js_array_; ElementsKind elements_kind_; KeyedAccessGrowMode grow_mode_; + bool fp_registers_; DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); }; @@ -1046,6 +1094,9 @@ class ToBooleanStub: public CodeStub { bool IsEmpty() const { return set_.IsEmpty(); } bool Contains(Type type) const { return set_.Contains(type); } + bool ContainsAnyOf(Types types) const { + return set_.ContainsAnyOf(types.set_); + } void Add(Type type) { set_.Add(type); } byte ToByte() const { return set_.ToIntegral(); } void Print(StringStream* stream) const; @@ -1132,14 +1183,19 @@ class ElementsTransitionAndStoreStub : public CodeStub { class StoreArrayLiteralElementStub : public CodeStub { public: - explicit StoreArrayLiteralElementStub() {} + StoreArrayLiteralElementStub() + : fp_registers_(CanUseFPRegisters()) { } private: + class FPRegisters: public BitField {}; + Major MajorKey() { return StoreArrayLiteralElement; } - int MinorKey() { return 0; } + int MinorKey() { return FPRegisters::encode(fp_registers_); } void Generate(MacroAssembler* masm); + bool fp_registers_; + DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub); }; @@ -1159,6 +1215,8 @@ class ProfileEntryHookStub : public CodeStub { // non-NULL hook. static bool SetFunctionEntryHook(FunctionEntryHook entry_hook); + static bool HasEntryHook() { return entry_hook_ != NULL; } + private: static void EntryHookTrampoline(intptr_t function, intptr_t stack_pointer); diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 0163580e90..83ac854a07 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -107,6 +107,7 @@ Handle CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, if (!code.is_null()) { isolate->counters()->total_compiled_code_size()->Increment( code->instruction_size()); + code->set_prologue_offset(info->prologue_offset()); } return code; } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 08a777f2ad..0ac68c2eac 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -90,6 +90,7 @@ namespace internal { typedef double (*UnaryMathFunction)(double x); UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type); +UnaryMathFunction CreateExpFunction(); UnaryMathFunction CreateSqrtFunction(); @@ -103,6 +104,19 @@ class ElementsTransitionGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator); }; + +class SeqStringSetCharGenerator : public AllStatic { + public: + static void Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value); + private: + DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator); +}; + + } } // namespace v8::internal #endif // V8_CODEGEN_H_ diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js index d36fe18fa0..b3c2db72d7 100644 --- a/deps/v8/src/collection.js +++ b/deps/v8/src/collection.js @@ -88,6 +88,25 @@ function SetDelete(key) { } +function SetGetSize() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.size', this]); + } + return %SetGetSize(this); +} + + +function SetClear() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.clear', this]); + } + // Replace the internal table with a new empty table. + %SetInitialize(this); +} + + function MapConstructor() { if (%_IsConstructCall()) { %MapInitialize(this); @@ -145,6 +164,25 @@ function MapDelete(key) { } +function MapGetSize() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.size', this]); + } + return %MapGetSize(this); +} + + +function MapClear() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.clear', this]); + } + // Replace the internal table with a new empty table. + %MapInitialize(this); +} + + function WeakMapConstructor() { if (%_IsConstructCall()) { %WeakMapInitialize(this); @@ -215,18 +253,22 @@ function WeakMapDelete(key) { %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM); // Set up the non-enumerable functions on the Set prototype object. + InstallGetter($Set.prototype, "size", SetGetSize); InstallFunctions($Set.prototype, DONT_ENUM, $Array( "add", SetAdd, "has", SetHas, - "delete", SetDelete + "delete", SetDelete, + "clear", SetClear )); // Set up the non-enumerable functions on the Map prototype object. + InstallGetter($Map.prototype, "size", MapGetSize); InstallFunctions($Map.prototype, DONT_ENUM, $Array( "get", MapGet, "set", MapSet, "has", MapHas, - "delete", MapDelete + "delete", MapDelete, + "clear", MapClear )); // Set up the WeakMap constructor function. diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index c0645760b3..904e84fd6c 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -98,7 +98,7 @@ void CompilationSubCache::Age() { void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { - Object* undefined = isolate()->heap()->raw_unchecked_undefined_value(); + Object* undefined = isolate()->heap()->undefined_value(); for (int i = 0; i < generations_; i++) { if (tables_[i] != undefined) { reinterpret_cast(tables_[i])->IterateElements(v); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 86374371e9..5779aae81b 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -52,57 +52,53 @@ namespace internal { CompilationInfo::CompilationInfo(Handle