From 56e6952e639ba1557a5b22333788583e9e39fa29 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Mon, 10 Oct 2011 11:52:42 -0700 Subject: [PATCH] Upgrade V8 to 3.6.6 --- deps/v8/ChangeLog | 55 + deps/v8/Makefile | 45 +- deps/v8/benchmarks/spinning-balls/index.html | 11 + .../benchmarks/spinning-balls/splay-tree.js | 326 ++ deps/v8/benchmarks/spinning-balls/v.js | 387 ++ deps/v8/build/common.gypi | 12 +- deps/v8/build/standalone.gypi | 31 +- deps/v8/include/v8-debug.h | 5 + deps/v8/include/v8-profiler.h | 6 + deps/v8/include/v8.h | 42 +- deps/v8/src/SConscript | 2 + deps/v8/src/api.cc | 130 +- deps/v8/src/arm/assembler-arm-inl.h | 24 +- deps/v8/src/arm/assembler-arm.cc | 12 +- deps/v8/src/arm/assembler-arm.h | 10 +- deps/v8/src/arm/builtins-arm.cc | 1118 ++--- deps/v8/src/arm/code-stubs-arm.cc | 585 ++- deps/v8/src/arm/code-stubs-arm.h | 245 +- deps/v8/src/arm/codegen-arm.cc | 8 +- deps/v8/src/arm/codegen-arm.h | 10 - deps/v8/src/arm/debug-arm.cc | 82 +- deps/v8/src/arm/deoptimizer-arm.cc | 34 +- deps/v8/src/arm/frames-arm.h | 10 + deps/v8/src/arm/full-codegen-arm.cc | 195 +- deps/v8/src/arm/ic-arm.cc | 149 +- deps/v8/src/arm/lithium-arm.cc | 38 +- deps/v8/src/arm/lithium-arm.h | 16 +- deps/v8/src/arm/lithium-codegen-arm.cc | 182 +- deps/v8/src/arm/lithium-codegen-arm.h | 7 +- deps/v8/src/arm/macro-assembler-arm.cc | 566 ++- deps/v8/src/arm/macro-assembler-arm.h | 226 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 15 +- deps/v8/src/arm/simulator-arm.cc | 2 + deps/v8/src/arm/stub-cache-arm.cc | 324 +- deps/v8/src/array.js | 151 +- deps/v8/src/assembler.cc | 56 +- deps/v8/src/assembler.h | 27 +- deps/v8/src/ast.cc | 154 +- deps/v8/src/ast.h | 29 +- deps/v8/src/bootstrapper.cc | 57 +- deps/v8/src/builtins.cc | 146 +- deps/v8/src/cached-powers.cc | 12 +- deps/v8/src/code-stubs.cc | 45 +- deps/v8/src/code-stubs.h | 117 +- deps/v8/src/codegen.cc | 2 +- deps/v8/src/compiler-intrinsics.h | 77 + deps/v8/src/compiler.cc | 3 +- deps/v8/src/contexts.cc | 100 +- deps/v8/src/contexts.h | 41 +- deps/v8/src/conversions-inl.h | 2 +- deps/v8/src/conversions.h | 2 - deps/v8/src/cpu-profiler.cc | 2 +- deps/v8/src/d8-debug.cc | 5 +- deps/v8/src/d8.cc | 34 +- deps/v8/src/debug.cc | 226 +- deps/v8/src/debug.h | 90 +- deps/v8/src/deoptimizer.cc | 70 +- deps/v8/src/deoptimizer.h | 18 +- deps/v8/src/disassembler.cc | 2 +- deps/v8/src/elements.cc | 11 +- deps/v8/src/execution.cc | 175 +- deps/v8/src/execution.h | 13 +- deps/v8/src/extensions/gc-extension.cc | 7 +- deps/v8/src/factory.cc | 96 +- deps/v8/src/factory.h | 32 +- deps/v8/src/flag-definitions.h | 24 + deps/v8/src/frames-inl.h | 67 +- deps/v8/src/frames.cc | 117 +- deps/v8/src/frames.h | 78 +- deps/v8/src/full-codegen.cc | 41 +- deps/v8/src/full-codegen.h | 23 +- deps/v8/src/func-name-inferrer.h | 6 + deps/v8/src/globals.h | 4 + deps/v8/src/handles.cc | 13 +- deps/v8/src/handles.h | 15 +- deps/v8/src/heap-inl.h | 104 +- deps/v8/src/heap-profiler.cc | 1 - deps/v8/src/heap.cc | 1531 ++++--- deps/v8/src/heap.h | 458 +- deps/v8/src/hydrogen-instructions.cc | 66 +- deps/v8/src/hydrogen-instructions.h | 344 +- deps/v8/src/hydrogen.cc | 408 +- deps/v8/src/hydrogen.h | 27 +- deps/v8/src/ia32/assembler-ia32-inl.h | 26 +- deps/v8/src/ia32/assembler-ia32.cc | 87 +- deps/v8/src/ia32/assembler-ia32.h | 88 +- deps/v8/src/ia32/builtins-ia32.cc | 1031 ++--- deps/v8/src/ia32/code-stubs-ia32.cc | 1112 +++-- deps/v8/src/ia32/code-stubs-ia32.h | 291 +- deps/v8/src/ia32/codegen-ia32.cc | 46 +- deps/v8/src/ia32/debug-ia32.cc | 95 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 100 +- deps/v8/src/ia32/disasm-ia32.cc | 29 +- deps/v8/src/ia32/full-codegen-ia32.cc | 323 +- deps/v8/src/ia32/ic-ia32.cc | 160 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 255 +- deps/v8/src/ia32/lithium-codegen-ia32.h | 13 +- deps/v8/src/ia32/lithium-ia32.cc | 64 +- deps/v8/src/ia32/lithium-ia32.h | 28 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 737 +++- deps/v8/src/ia32/macro-assembler-ia32.h | 226 +- .../src/ia32/regexp-macro-assembler-ia32.cc | 116 +- deps/v8/src/ia32/stub-cache-ia32.cc | 358 +- deps/v8/src/ic-inl.h | 2 + deps/v8/src/ic.cc | 44 +- deps/v8/src/incremental-marking-inl.h | 155 + deps/v8/src/incremental-marking.cc | 811 ++++ deps/v8/src/incremental-marking.h | 254 ++ deps/v8/src/isolate-inl.h | 15 + deps/v8/src/isolate.cc | 30 +- deps/v8/src/isolate.h | 29 +- deps/v8/src/json-parser.h | 2 +- deps/v8/src/jsregexp.cc | 7 +- deps/v8/src/jsregexp.h | 5 +- deps/v8/src/lithium-allocator.cc | 19 +- deps/v8/src/lithium.cc | 22 + deps/v8/src/lithium.h | 18 +- deps/v8/src/liveedit.cc | 11 +- deps/v8/src/liveobjectlist.cc | 14 +- deps/v8/src/log.cc | 14 +- deps/v8/src/log.h | 1 + deps/v8/src/macro-assembler.h | 59 +- deps/v8/src/mark-compact-inl.h | 101 + deps/v8/src/mark-compact.cc | 3748 +++++++++-------- deps/v8/src/mark-compact.h | 639 ++- deps/v8/src/messages.cc | 14 +- deps/v8/src/mips/assembler-mips-inl.h | 4 +- deps/v8/src/mips/builtins-mips.cc | 1151 ++--- deps/v8/src/mips/code-stubs-mips.cc | 554 +-- deps/v8/src/mips/code-stubs-mips.h | 4 +- deps/v8/src/mips/codegen-mips.cc | 8 +- deps/v8/src/mips/debug-mips.cc | 83 +- deps/v8/src/mips/full-codegen-mips.cc | 84 +- deps/v8/src/mips/ic-mips.cc | 39 +- deps/v8/src/mips/macro-assembler-mips.cc | 509 ++- deps/v8/src/mips/macro-assembler-mips.h | 215 +- .../src/mips/regexp-macro-assembler-mips.cc | 26 +- deps/v8/src/mips/stub-cache-mips.cc | 160 +- deps/v8/src/mksnapshot.cc | 3 +- deps/v8/src/objects-debug.cc | 25 +- deps/v8/src/objects-inl.h | 654 ++- deps/v8/src/objects-printer.cc | 121 +- deps/v8/src/objects-visiting-inl.h | 143 + deps/v8/src/objects-visiting.cc | 3 + deps/v8/src/objects-visiting.h | 138 +- deps/v8/src/objects.cc | 1456 +++++-- deps/v8/src/objects.h | 614 +-- deps/v8/src/parser.cc | 148 +- deps/v8/src/parser.h | 5 - deps/v8/src/platform-linux.cc | 113 +- deps/v8/src/platform-macos.cc | 108 +- deps/v8/src/platform-openbsd.cc | 5 +- deps/v8/src/platform-win32.cc | 91 +- deps/v8/src/platform.h | 59 +- deps/v8/src/preparser.cc | 72 +- deps/v8/src/prettyprinter.cc | 26 - deps/v8/src/profile-generator.cc | 97 +- deps/v8/src/profile-generator.h | 14 +- deps/v8/src/property.h | 21 +- deps/v8/src/proxy.js | 23 +- deps/v8/src/regexp-macro-assembler-tracer.cc | 4 +- deps/v8/src/regexp.js | 11 +- deps/v8/src/runtime-profiler.cc | 4 +- deps/v8/src/runtime.cc | 914 ++-- deps/v8/src/runtime.h | 3 + deps/v8/src/runtime.js | 18 +- deps/v8/src/serialize.cc | 133 +- deps/v8/src/serialize.h | 3 +- deps/v8/src/spaces-inl.h | 528 +-- deps/v8/src/spaces.cc | 2969 ++++++------- deps/v8/src/spaces.h | 2577 +++++++----- deps/v8/src/splay-tree-inl.h | 6 +- deps/v8/src/store-buffer-inl.h | 79 + deps/v8/src/store-buffer.cc | 694 +++ deps/v8/src/store-buffer.h | 248 ++ deps/v8/src/string.js | 6 +- deps/v8/src/strtod.cc | 1 - deps/v8/src/stub-cache.cc | 17 +- deps/v8/src/stub-cache.h | 3 +- deps/v8/src/token.h | 4 + deps/v8/src/type-info.cc | 58 +- deps/v8/src/type-info.h | 9 +- deps/v8/src/uri.js | 37 +- deps/v8/src/utils.h | 18 +- deps/v8/src/v8-counters.h | 9 +- deps/v8/src/v8.cc | 1 + deps/v8/src/v8.h | 12 +- deps/v8/src/v8globals.h | 36 +- deps/v8/src/v8natives.js | 18 +- deps/v8/src/v8utils.h | 10 +- deps/v8/src/version.cc | 2 +- deps/v8/src/win32-headers.h | 1 + deps/v8/src/x64/assembler-x64-inl.h | 26 +- deps/v8/src/x64/assembler-x64.cc | 4 +- deps/v8/src/x64/assembler-x64.h | 15 +- deps/v8/src/x64/builtins-x64.cc | 1077 ++--- deps/v8/src/x64/code-stubs-x64.cc | 486 ++- deps/v8/src/x64/code-stubs-x64.h | 277 +- deps/v8/src/x64/codegen-x64.cc | 8 +- deps/v8/src/x64/debug-x64.cc | 105 +- deps/v8/src/x64/deoptimizer-x64.cc | 32 +- deps/v8/src/x64/full-codegen-x64.cc | 183 +- deps/v8/src/x64/ic-x64.cc | 130 +- deps/v8/src/x64/lithium-codegen-x64.cc | 196 +- deps/v8/src/x64/lithium-codegen-x64.h | 10 +- deps/v8/src/x64/lithium-x64.cc | 34 +- deps/v8/src/x64/lithium-x64.h | 28 +- deps/v8/src/x64/macro-assembler-x64.cc | 620 ++- deps/v8/src/x64/macro-assembler-x64.h | 252 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 22 +- deps/v8/src/x64/stub-cache-x64.cc | 254 +- deps/v8/test/cctest/cctest.gyp | 9 +- deps/v8/test/cctest/cctest.status | 14 + deps/v8/test/cctest/test-accessors.cc | 2 +- deps/v8/test/cctest/test-alloc.cc | 40 +- deps/v8/test/cctest/test-api.cc | 472 ++- deps/v8/test/cctest/test-assembler-ia32.cc | 26 +- deps/v8/test/cctest/test-compiler.cc | 11 +- deps/v8/test/cctest/test-debug.cc | 41 +- deps/v8/test/cctest/test-decls.cc | 40 +- deps/v8/test/cctest/test-disasm-ia32.cc | 48 +- .../test/cctest/test-func-name-inference.cc | 39 + deps/v8/test/cctest/test-heap-profiler.cc | 124 +- deps/v8/test/cctest/test-heap.cc | 202 +- deps/v8/test/cctest/test-log.cc | 2 +- deps/v8/test/cctest/test-mark-compact.cc | 29 +- deps/v8/test/cctest/test-profile-generator.cc | 2 +- deps/v8/test/cctest/test-regexp.cc | 3 +- deps/v8/test/cctest/test-reloc-info.cc | 2 +- deps/v8/test/cctest/test-serialize.cc | 37 +- deps/v8/test/cctest/test-spaces.cc | 90 +- deps/v8/test/cctest/test-strings.cc | 31 +- deps/v8/test/cctest/test-threads.cc | 22 +- deps/v8/test/cctest/test-weakmaps.cc | 35 +- deps/v8/test/es5conform/es5conform.status | 34 - deps/v8/test/mjsunit/array-tostring.js | 159 + deps/v8/test/mjsunit/assert-opt-and-deopt.js | 6 +- .../mjsunit/compiler/inline-context-slots.js | 49 + .../v8/test/mjsunit/compiler/regress-96989.js | 43 + deps/v8/test/mjsunit/const-redecl.js | 62 +- deps/v8/test/mjsunit/element-kind.js | 124 +- .../mjsunit/global-const-var-conflicts.js | 10 +- .../mjsunit/harmony/block-let-declaration.js | 55 +- .../mjsunit/harmony/block-let-semantics.js | 25 +- .../test/mjsunit/harmony/proxies-function.js | 382 ++ deps/v8/test/mjsunit/harmony/proxies-hash.js | 66 + deps/v8/test/mjsunit/harmony/proxies.js | 844 +++- deps/v8/test/mjsunit/mjsunit.status | 6 +- deps/v8/test/mjsunit/regress/regress-1170.js | 2 +- .../test/mjsunit/regress/regress-1213575.js | 11 +- deps/v8/test/mjsunit/regress/regress-1217.js | 50 + deps/v8/test/mjsunit/regress/regress-1415.js | 42 + .../v8/test/mjsunit/regress/regress-1639-2.js | 93 + deps/v8/test/mjsunit/regress/regress-1692.js | 89 + deps/v8/test/mjsunit/regress/regress-1708.js | 63 + deps/v8/test/mjsunit/regress/regress-1711.js | 38 + deps/v8/test/mjsunit/regress/regress-1713.js | 127 + deps/v8/test/mjsunit/regress/regress-1748.js | 35 + .../v8/test/mjsunit/regress/regress-877615.js | 12 +- deps/v8/test/mjsunit/regress/regress-94873.js | 78 + deps/v8/test/mjsunit/regress/regress-98773.js | 39 + deps/v8/test/mjsunit/regress/regress-99167.js | 33 + .../test/mjsunit/regress/regress-deopt-gc.js | 2 +- deps/v8/test/mjsunit/regress/short-circuit.js | 32 + deps/v8/test/mjsunit/string-slices-regexp.js | 2 +- deps/v8/test/mjsunit/string-slices.js | 17 +- deps/v8/test/mjsunit/undeletable-functions.js | 4 +- deps/v8/test/mozilla/mozilla.status | 9 +- deps/v8/test/preparser/strict-identifiers.pyt | 55 + deps/v8/test/sputnik/sputnik.status | 16 + deps/v8/test/test262/README | 4 +- deps/v8/test/test262/test262.status | 524 ++- deps/v8/test/test262/testcfg.py | 34 +- deps/v8/tools/gc-nvp-trace-processor.py | 59 +- deps/v8/tools/gcmole/gccause.lua | 2 + deps/v8/tools/gyp/v8.gyp | 7 +- deps/v8/tools/linux-tick-processor | 31 +- deps/v8/tools/ll_prof.py | 16 +- deps/v8/tools/logreader.js | 7 +- deps/v8/tools/presubmit.py | 7 +- deps/v8/tools/push-to-trunk.sh | 12 +- deps/v8/tools/test-wrapper-gypbuild.py | 30 +- 282 files changed, 29521 insertions(+), 15453 deletions(-) create mode 100644 deps/v8/benchmarks/spinning-balls/index.html create mode 100644 deps/v8/benchmarks/spinning-balls/splay-tree.js create mode 100644 deps/v8/benchmarks/spinning-balls/v.js mode change 100644 => 100755 deps/v8/include/v8-debug.h mode change 100644 => 100755 deps/v8/src/SConscript create mode 100644 deps/v8/src/compiler-intrinsics.h create mode 100644 deps/v8/src/incremental-marking-inl.h create mode 100644 deps/v8/src/incremental-marking.cc create mode 100644 deps/v8/src/incremental-marking.h create mode 100644 deps/v8/src/mark-compact-inl.h create mode 100644 deps/v8/src/objects-visiting-inl.h create mode 100644 deps/v8/src/store-buffer-inl.h create mode 100644 deps/v8/src/store-buffer.cc create mode 100644 deps/v8/src/store-buffer.h create mode 100644 deps/v8/test/mjsunit/array-tostring.js create mode 100644 deps/v8/test/mjsunit/compiler/inline-context-slots.js create mode 100644 deps/v8/test/mjsunit/compiler/regress-96989.js create mode 100644 deps/v8/test/mjsunit/harmony/proxies-function.js create mode 100644 deps/v8/test/mjsunit/harmony/proxies-hash.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1217.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1415.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1639-2.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1692.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1708.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1711.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1713.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1748.js create mode 100644 deps/v8/test/mjsunit/regress/regress-94873.js create mode 100644 deps/v8/test/mjsunit/regress/regress-98773.js create mode 100644 deps/v8/test/mjsunit/regress/regress-99167.js create mode 100644 deps/v8/test/mjsunit/regress/short-circuit.js diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 99495dd46b..8bcd874afb 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,58 @@ +2011-10-10: Version 3.6.6 + + Added a GC pause visualization tool. + + Added presubmit=no and werror=no flags to Makefile. + + ES5/Test262 conformance improvements. + + Fixed compilation issues with GCC 4.5.x (issue 1743). + + Bug fixes and performance improvements on all platforms. + + +2011-10-05: Version 3.6.5 + + New incremental garbage collector. + + Removed the hard heap size limit (soft heap size limit is still + 700/1400Mbytes by default). + + Implemented ES5 generic Array.prototype.toString (Issue 1361). + + V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415). + + Fixed x64 RegExp start-of-string bug (Issues 1746, 1748). + + Fixed propertyIsEnumerable for numeric properties (Issue 1692). + + Fixed the MinGW and Windows 2000 builds. + + Fixed "Prototype chain is not searched if named property handler does + not set a property" (Issue 1636). + + Made the RegExp.prototype object be a RegExp object (Issue 1217). + + Disallowed future reserved words as labels in strict mode. + + Fixed string split to correctly coerce the separator to a string + (Issue 1711). + + API: Added an optional source length field to the Extension + constructor. + + API: Added Debug::DisableAgent to match existing Debug::EnableAgent + (Issue 1573). + + Added "native" target to Makefile for the benefit of Linux distros. + + Fixed: debugger stops stepping outside evaluate (Issue 1639). + + More work on ES-Harmony proxies. Still hidden behind a flag. + + Bug fixes and performance improvements on all platforms. + + 2011-09-15: Version 3.6.4 Fixed d8's broken readline history. diff --git a/deps/v8/Makefile b/deps/v8/Makefile index a7b27317a3..76f45d7f2e 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -32,6 +32,7 @@ LINK ?= "g++" OUTDIR ?= out TESTJOBS ?= -j16 GYPFLAGS ?= +TESTFLAGS ?= # Special build flags. Use them like this: "make library=shared" @@ -50,6 +51,10 @@ endif ifeq ($(disassembler), on) GYPFLAGS += -Dv8_enable_disassembler=1 endif +# objectprint=on +ifeq ($(objectprint), on) + GYPFLAGS += -Dv8_object_print=1 +endif # snapshot=off ifeq ($(snapshot), off) GYPFLAGS += -Dv8_use_snapshot='false' @@ -72,12 +77,21 @@ endif ifdef soname_version GYPFLAGS += -Dsoname_version=$(soname_version) endif +# werror=no +ifeq ($(werror), no) + GYPFLAGS += -Dwerror='' +endif +# presubmit=no +ifeq ($(presubmit), no) + TESTFLAGS += --no-presubmit +endif # ----------------- available targets: -------------------- # - "dependencies": pulls in external dependencies (currently: GYP) # - any arch listed in ARCHES (see below) # - any mode listed in MODES # - every combination ., e.g. "ia32.release" +# - "native": current host's architecture, release mode # - any of the above with .check appended, e.g. "ia32.release.check" # - default (no target specified): build all ARCHES and MODES # - "check": build all targets and run all tests @@ -103,7 +117,7 @@ CHECKS = $(addsuffix .check,$(BUILDS)) # File where previously used GYPFLAGS are stored. ENVFILE = $(OUTDIR)/environment -.PHONY: all check clean dependencies $(ENVFILE).new \ +.PHONY: all check clean dependencies $(ENVFILE).new native \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) @@ -124,21 +138,31 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@) python -c "print raw_input().capitalize()") \ builddir="$(shell pwd)/$(OUTDIR)/$@" +native: $(OUTDIR)/Makefile-native + @$(MAKE) -C "$(OUTDIR)" -f Makefile-native \ + CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \ + builddir="$(shell pwd)/$(OUTDIR)/$@" + # Test targets. check: all - @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) + @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ + $(TESTFLAGS) $(addsuffix .check,$(MODES)): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --mode=$(basename $@) + --mode=$(basename $@) $(TESTFLAGS) $(addsuffix .check,$(ARCHES)): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --arch=$(basename $@) + --arch=$(basename $@) $(TESTFLAGS) $(CHECKS): $$(basename $$@) @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ - --arch-and-mode=$(basename $@) + --arch-and-mode=$(basename $@) $(TESTFLAGS) + +native.check: native + @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \ + --arch-and-mode=. $(TESTFLAGS) # Clean targets. You can clean each architecture individually, or everything. $(addsuffix .clean,$(ARCHES)): @@ -147,7 +171,12 @@ $(addsuffix .clean,$(ARCHES)): rm -rf $(OUTDIR)/$(basename $@).debug find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete -clean: $(addsuffix .clean,$(ARCHES)) +native.clean: + rm -f $(OUTDIR)/Makefile-native + rm -rf $(OUTDIR)/native + find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete + +clean: $(addsuffix .clean,$(ARCHES)) native.clean # GYP file generation targets. $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE) @@ -165,6 +194,10 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \ -S-arm $(GYPFLAGS) +$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE) + build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ + -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS) + # Replaces the old with the new environment file if they're different, which # will trigger GYP to regenerate Makefiles. $(ENVFILE): $(ENVFILE).new diff --git a/deps/v8/benchmarks/spinning-balls/index.html b/deps/v8/benchmarks/spinning-balls/index.html new file mode 100644 index 0000000000..d01f31f373 --- /dev/null +++ b/deps/v8/benchmarks/spinning-balls/index.html @@ -0,0 +1,11 @@ + + + + + + + + + diff --git a/deps/v8/benchmarks/spinning-balls/splay-tree.js b/deps/v8/benchmarks/spinning-balls/splay-tree.js new file mode 100644 index 0000000000..a88e4cbce1 --- /dev/null +++ b/deps/v8/benchmarks/spinning-balls/splay-tree.js @@ -0,0 +1,326 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/** + * Constructs a Splay tree. A splay tree is a self-balancing binary + * search tree with the additional property that recently accessed + * elements are quick to access again. It performs basic operations + * such as insertion, look-up and removal in O(log(n)) amortized time. + * + * @constructor + */ +function SplayTree() { +}; + + +/** + * Pointer to the root node of the tree. + * + * @type {SplayTree.Node} + * @private + */ +SplayTree.prototype.root_ = null; + + +/** + * @return {boolean} Whether the tree is empty. + */ +SplayTree.prototype.isEmpty = function() { + return !this.root_; +}; + + +/** + * Inserts a node into the tree with the specified key and value if + * the tree does not already contain a node with the specified key. If + * the value is inserted, it becomes the root of the tree. + * + * @param {number} key Key to insert into the tree. + * @param {*} value Value to insert into the tree. + */ +SplayTree.prototype.insert = function(key, value) { + if (this.isEmpty()) { + this.root_ = new SplayTree.Node(key, value); + return; + } + // Splay on the key to move the last node on the search path for + // the key to the root of the tree. + this.splay_(key); + if (this.root_.key == key) { + return; + } + var node = new SplayTree.Node(key, value); + if (key > this.root_.key) { + node.left = this.root_; + node.right = this.root_.right; + this.root_.right = null; + } else { + node.right = this.root_; + node.left = this.root_.left; + this.root_.left = null; + } + this.root_ = node; +}; + + +/** + * Removes a node with the specified key from the tree if the tree + * contains a node with this key. The removed node is returned. If the + * key is not found, an exception is thrown. + * + * @param {number} key Key to find and remove from the tree. + * @return {SplayTree.Node} The removed node. + */ +SplayTree.prototype.remove = function(key) { + if (this.isEmpty()) { + throw Error('Key not found: ' + key); + } + this.splay_(key); + if (this.root_.key != key) { + throw Error('Key not found: ' + key); + } + var removed = this.root_; + if (!this.root_.left) { + this.root_ = this.root_.right; + } else { + var right = this.root_.right; + this.root_ = this.root_.left; + // Splay to make sure that the new root has an empty right child. + this.splay_(key); + // Insert the original right child as the right child of the new + // root. + this.root_.right = right; + } + return removed; +}; + + +/** + * Returns the node having the specified key or null if the tree doesn't contain + * a node with the specified key. + * + * @param {number} key Key to find in the tree. + * @return {SplayTree.Node} Node having the specified key. + */ +SplayTree.prototype.find = function(key) { + if (this.isEmpty()) { + return null; + } + this.splay_(key); + return this.root_.key == key ? this.root_ : null; +}; + + +/** + * @return {SplayTree.Node} Node having the maximum key value. + */ +SplayTree.prototype.findMax = function(opt_startNode) { + if (this.isEmpty()) { + return null; + } + var current = opt_startNode || this.root_; + while (current.right) { + current = current.right; + } + return current; +}; + + +/** + * @return {SplayTree.Node} Node having the maximum key value that + * is less than the specified key value. + */ +SplayTree.prototype.findGreatestLessThan = function(key) { + if (this.isEmpty()) { + return null; + } + // Splay on the key to move the node with the given key or the last + // node on the search path to the top of the tree. + this.splay_(key); + // Now the result is either the root node or the greatest node in + // the left subtree. + if (this.root_.key < key) { + return this.root_; + } else if (this.root_.left) { + return this.findMax(this.root_.left); + } else { + return null; + } +}; + + +/** + * @return {Array<*>} An array containing all the keys of tree's nodes. + */ +SplayTree.prototype.exportKeys = function() { + var result = []; + if (!this.isEmpty()) { + this.root_.traverse_(function(node) { result.push(node.key); }); + } + return result; +}; + + +/** + * Perform the splay operation for the given key. Moves the node with + * the given key to the top of the tree. If no node has the given + * key, the last node on the search path is moved to the top of the + * tree. This is the simplified top-down splaying algorithm from: + * "Self-adjusting Binary Search Trees" by Sleator and Tarjan + * + * @param {number} key Key to splay the tree on. + * @private + */ +SplayTree.prototype.splay_ = function(key) { + if (this.isEmpty()) { + return; + } + // Create a dummy node. The use of the dummy node is a bit + // counter-intuitive: The right child of the dummy node will hold + // the L tree of the algorithm. The left child of the dummy node + // will hold the R tree of the algorithm. Using a dummy node, left + // and right will always be nodes and we avoid special cases. + var dummy, left, right; + dummy = left = right = new SplayTree.Node(null, null); + var current = this.root_; + while (true) { + if (key < current.key) { + if (!current.left) { + break; + } + if (key < current.left.key) { + // Rotate right. + var tmp = current.left; + current.left = tmp.right; + tmp.right = current; + current = tmp; + if (!current.left) { + break; + } + } + // Link right. + right.left = current; + right = current; + current = current.left; + } else if (key > current.key) { + if (!current.right) { + break; + } + if (key > current.right.key) { + // Rotate left. + var tmp = current.right; + current.right = tmp.left; + tmp.left = current; + current = tmp; + if (!current.right) { + break; + } + } + // Link left. + left.right = current; + left = current; + current = current.right; + } else { + break; + } + } + // Assemble. + left.right = current.left; + right.left = current.right; + current.left = dummy.right; + current.right = dummy.left; + this.root_ = current; +}; + + +/** + * Constructs a Splay tree node. + * + * @param {number} key Key. + * @param {*} value Value. + */ +SplayTree.Node = function(key, value) { + this.key = key; + this.value = value; +}; + + +/** + * @type {SplayTree.Node} + */ +SplayTree.Node.prototype.left = null; + + +/** + * @type {SplayTree.Node} + */ +SplayTree.Node.prototype.right = null; + + +/** + * Performs an ordered traversal of the subtree starting at + * this SplayTree.Node. + * + * @param {function(SplayTree.Node)} f Visitor function. + * @private + */ +SplayTree.Node.prototype.traverse_ = function(f) { + var current = this; + while (current) { + var left = current.left; + if (left) left.traverse_(f); + f(current); + current = current.right; + } +}; + +SplayTree.prototype.traverseBreadthFirst = function (f) { + if (f(this.root_.value)) return; + + var stack = [this.root_]; + var length = 1; + + while (length > 0) { + var new_stack = new Array(stack.length * 2); + var new_length = 0; + for (var i = 0; i < length; i++) { + var n = stack[i]; + var l = n.left; + var r = n.right; + if (l) { + if (f(l.value)) return; + new_stack[new_length++] = l; + } + if (r) { + if (f(r.value)) return; + new_stack[new_length++] = r; + } + } + stack = new_stack; + length = new_length; + } +}; diff --git a/deps/v8/benchmarks/spinning-balls/v.js b/deps/v8/benchmarks/spinning-balls/v.js new file mode 100644 index 0000000000..87366d9393 --- /dev/null +++ b/deps/v8/benchmarks/spinning-balls/v.js @@ -0,0 +1,387 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +/** + * This function provides requestAnimationFrame in a cross browser way. + * http://paulirish.com/2011/requestanimationframe-for-smart-animating/ + */ +if ( !window.requestAnimationFrame ) { + window.requestAnimationFrame = ( function() { + return window.webkitRequestAnimationFrame || + window.mozRequestAnimationFrame || + window.oRequestAnimationFrame || + window.msRequestAnimationFrame || + function(callback, element) { + window.setTimeout( callback, 1000 / 60 ); + }; + } )(); +} + +var kNPoints = 8000; +var kNModifications = 20; +var kNVisiblePoints = 200; +var kDecaySpeed = 20; + +var kPointRadius = 4; +var kInitialLifeForce = 100; + +var livePoints = void 0; +var dyingPoints = void 0; +var scene = void 0; +var renderingStartTime = void 0; +var scene = void 0; +var pausePlot = void 0; +var splayTree = void 0; + + +function Point(x, y, z, payload) { + this.x = x; + this.y = y; + this.z = z; + + this.next = null; + this.prev = null; + this.payload = payload; + this.lifeForce = kInitialLifeForce; +} + + +Point.prototype.color = function () { + return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")"; +}; + + +Point.prototype.decay = function () { + this.lifeForce -= kDecaySpeed; + return this.lifeForce <= 0; +}; + + +function PointsList() { + this.head = null; + this.count = 0; +} + + +PointsList.prototype.add = function (point) { + if (this.head !== null) this.head.prev = point; + point.next = this.head; + this.head = point; + this.count++; +} + + +PointsList.prototype.remove = function (point) { + if (point.next !== null) { + point.next.prev = point.prev; + } + if (point.prev !== null) { + point.prev.next = point.next; + } else { + this.head = point.next; + } + this.count--; +} + + +function GeneratePayloadTree(depth, tag) { + if (depth == 0) { + return { + array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], + string : 'String for key ' + tag + ' in leaf node' + }; + } else { + return { + left: GeneratePayloadTree(depth - 1, tag), + right: GeneratePayloadTree(depth - 1, tag) + }; + } +} + + +// To make the benchmark results predictable, we replace Math.random +// with a 100% deterministic alternative. +Math.random = (function() { + var seed = 49734321; + return function() { + // Robert Jenkins' 32 bit integer hash function. + seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff; + seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff; + seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff; + seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff; + seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff; + seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff; + return (seed & 0xfffffff) / 0x10000000; + }; +})(); + + +function GenerateKey() { + // The benchmark framework guarantees that Math.random is + // deterministic; see base.js. + return Math.random(); +} + +function CreateNewPoint() { + // Insert new node with a unique key. + var key; + do { key = GenerateKey(); } while (splayTree.find(key) != null); + + var point = new Point(Math.random() * 40 - 20, + Math.random() * 40 - 20, + Math.random() * 40 - 20, + GeneratePayloadTree(5, "" + key)); + + livePoints.add(point); + + splayTree.insert(key, point); + return key; +} + +function ModifyPointsSet() { + if (livePoints.count < kNPoints) { + for (var i = 0; i < kNModifications; i++) { + CreateNewPoint(); + } + } else if (kNModifications === 20) { + kNModifications = 80; + kDecay = 30; + } + + for (var i = 0; i < kNModifications; i++) { + var key = CreateNewPoint(); + var greatest = splayTree.findGreatestLessThan(key); + if (greatest == null) { + var point = splayTree.remove(key).value; + } else { + var point = splayTree.remove(greatest.key).value; + } + livePoints.remove(point); + point.payload = null; + dyingPoints.add(point); + } +} + + +function PausePlot(width, height, size) { + var canvas = document.createElement("canvas"); + canvas.width = this.width = width; + canvas.height = this.height = height; + document.body.appendChild(canvas); + + this.ctx = canvas.getContext('2d'); + + this.maxPause = 0; + this.size = size; + + // Initialize cyclic buffer for pauses. + this.pauses = new Array(this.size); + this.start = this.size; + this.idx = 0; +} + + +PausePlot.prototype.addPause = function (p) { + if (this.idx === this.size) { + this.idx = 0; + } + + if (this.idx === this.start) { + this.start++; + } + + if (this.start === this.size) { + this.start = 0; + } + + this.pauses[this.idx++] = p; +}; + + +PausePlot.prototype.iteratePauses = function (f) { + if (this.start < this.idx) { + for (var i = this.start; i < this.idx; i++) { + f.call(this, i - this.start, this.pauses[i]); + } + } else { + for (var i = this.start; i < this.size; i++) { + f.call(this, i - this.start, this.pauses[i]); + } + + var offs = this.size - this.start; + for (var i = 0; i < this.idx; i++) { + f.call(this, i + offs, this.pauses[i]); + } + } +}; + + +PausePlot.prototype.draw = function () { + var first = null; + this.iteratePauses(function (i, v) { + if (first === null) { + first = v; + } + this.maxPause = Math.max(v, this.maxPause); + }); + + var dx = this.width / this.size; + var dy = this.height / this.maxPause; + + this.ctx.save(); + this.ctx.clearRect(0, 0, 480, 240); + this.ctx.beginPath(); + this.ctx.moveTo(1, dy * this.pauses[this.start]); + var p = first; + this.iteratePauses(function (i, v) { + var delta = v - p; + var x = 1 + dx * i; + var y = dy * v; + this.ctx.lineTo(x, y); + if (delta > 2 * (p / 3)) { + this.ctx.font = "bold 12px sans-serif"; + this.ctx.textBaseline = "bottom"; + this.ctx.fillText(v + "ms", x + 2, y); + } + p = v; + }); + this.ctx.strokeStyle = "black"; + this.ctx.stroke(); + this.ctx.restore(); +} + + +function Scene(width, height) { + var canvas = document.createElement("canvas"); + canvas.width = width; + canvas.height = height; + document.body.appendChild(canvas); + + this.ctx = canvas.getContext('2d'); + this.width = canvas.width; + this.height = canvas.height; + + // Projection configuration. + this.x0 = canvas.width / 2; + this.y0 = canvas.height / 2; + this.z0 = 100; + this.f = 1000; // Focal length. + + // Camera is rotating around y-axis. + this.angle = 0; +} + + +Scene.prototype.drawPoint = function (x, y, z, color) { + // Rotate the camera around y-axis. + var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle); + var ry = y; + var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle); + + // Perform perspective projection. + var px = (this.f * rx) / (rz - this.z0) + this.x0; + var py = (this.f * ry) / (rz - this.z0) + this.y0; + + this.ctx.save(); + this.ctx.fillStyle = color + this.ctx.beginPath(); + this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true); + this.ctx.fill(); + this.ctx.restore(); +}; + + +Scene.prototype.drawDyingPoints = function () { + var point_next = null; + for (var point = dyingPoints.head; point !== null; point = point_next) { + // Rotate the scene around y-axis. + scene.drawPoint(point.x, point.y, point.z, point.color()); + + point_next = point.next; + + // Decay the current point and remove it from the list + // if it's life-force ran out. + if (point.decay()) { + dyingPoints.remove(point); + } + } +}; + + +Scene.prototype.draw = function () { + this.ctx.save(); + this.ctx.clearRect(0, 0, this.width, this.height); + this.drawDyingPoints(); + this.ctx.restore(); + + this.angle += Math.PI / 90.0; +}; + + +function render() { + if (typeof renderingStartTime === 'undefined') { + renderingStartTime = Date.now(); + } + + ModifyPointsSet(); + + scene.draw(); + + var renderingEndTime = Date.now(); + var pause = renderingEndTime - renderingStartTime; + pausePlot.addPause(pause); + renderingStartTime = renderingEndTime; + + pausePlot.draw(); + + div.innerHTML = + livePoints.count + "/" + dyingPoints.count + " " + + pause + "(max = " + pausePlot.maxPause + ") ms" ; + + // Schedule next frame. + requestAnimationFrame(render); +} + + +function init() { + livePoints = new PointsList; + dyingPoints = new PointsList; + + splayTree = new SplayTree(); + + scene = new Scene(640, 480); + + div = document.createElement("div"); + document.body.appendChild(div); + + pausePlot = new PausePlot(480, 240, 160); +} + + +init(); +render(); diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 6b6e00272d..230b1fd7a8 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -60,6 +60,8 @@ 'v8_enable_disassembler%': 0, + 'v8_object_print%': 0, + 'v8_enable_gdbjit%': 0, # Enable profiling support. Only required on Windows. @@ -72,6 +74,7 @@ 'v8_use_snapshot%': 'true', 'host_os%': '<(OS)', 'v8_use_liveobjectlist%': 'false', + 'werror%': '-Werror', # For a shared library build, results in "libv8-<(soname_version).so". 'soname_version%': '', @@ -84,6 +87,9 @@ ['v8_enable_disassembler==1', { 'defines': ['ENABLE_DISASSEMBLER',], }], + ['v8_object_print==1', { + 'defines': ['OBJECT_PRINT',], + }], ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], @@ -185,7 +191,7 @@ ], }], ['OS=="solaris"', { - 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. + 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. }], ], 'configurations': { @@ -221,7 +227,7 @@ 'cflags': [ '-I/usr/local/include' ], }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter', + 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor' ], }], ], @@ -264,7 +270,7 @@ }], ['OS=="win"', { 'msvs_configuration_attributes': { - 'OutputDirectory': '$(SolutionDir)$(ConfigurationName)', + 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'CharacterSet': '1', }, diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi index cb5e133039..f24d9f8341 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/build/standalone.gypi @@ -35,25 +35,30 @@ 'msvs_multi_core_compile%': '1', 'variables': { 'variables': { - 'conditions': [ - [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', { - # This handles the Linux platforms we generally deal with. Anything - # else gets passed through, which probably won't work very well; such - # hosts should pass an explicit target_arch to gyp. - 'host_arch%': - ' GetHeapValue() const; }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 4b7f6e735f..73b7fbe4c4 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1171,7 +1171,8 @@ class String : public Primitive { * Get the ExternalAsciiStringResource for an external ASCII string. * Returns NULL if IsExternalAscii() doesn't return true. */ - V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const; + V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource() + const; static inline String* Cast(v8::Value* obj); @@ -2451,24 +2452,42 @@ class V8EXPORT TypeSwitch : public Data { // --- Extensions --- +class V8EXPORT ExternalAsciiStringResourceImpl + : public String::ExternalAsciiStringResource { + public: + ExternalAsciiStringResourceImpl() : data_(0), length_(0) {} + ExternalAsciiStringResourceImpl(const char* data, size_t length) + : data_(data), length_(length) {} + const char* data() const { return data_; } + size_t length() const { return length_; } + + private: + const char* data_; + size_t length_; +}; /** * Ignore */ class V8EXPORT Extension { // NOLINT public: + // Note that the strings passed into this constructor must live as long + // as the Extension itself. Extension(const char* name, const char* source = 0, int dep_count = 0, - const char** deps = 0); + const char** deps = 0, + int source_length = -1); virtual ~Extension() { } virtual v8::Handle GetNativeFunction(v8::Handle name) { return v8::Handle(); } - const char* name() { return name_; } - const char* source() { return source_; } + const char* name() const { return name_; } + size_t source_length() const { return source_length_; } + const String::ExternalAsciiStringResource* source() const { + return &source_; } int dependency_count() { return dep_count_; } const char** dependencies() { return deps_; } void set_auto_enable(bool value) { auto_enable_ = value; } @@ -2476,7 +2495,8 @@ class V8EXPORT Extension { // NOLINT private: const char* name_; - const char* source_; + size_t source_length_; // expected to initialize before source_ + ExternalAsciiStringResourceImpl source_; int dep_count_; const char** deps_; bool auto_enable_; @@ -3498,9 +3518,9 @@ class V8EXPORT Context { * * v8::Locker is a scoped lock object. While it's * active (i.e. between its construction and destruction) the current thread is - * allowed to use the locked isolate. V8 guarantees that an isolate can be locked - * by at most one thread at any time. In other words, the scope of a v8::Locker is - * a critical section. + * allowed to use the locked isolate. V8 guarantees that an isolate can be + * locked by at most one thread at any time. In other words, the scope of a + * v8::Locker is a critical section. * * Sample usage: * \code @@ -3602,8 +3622,8 @@ class V8EXPORT Locker { static void StopPreemption(); /** - * Returns whether or not the locker for a given isolate, or default isolate if NULL is given, - * is locked by the current thread. + * Returns whether or not the locker for a given isolate, or default isolate + * if NULL is given, is locked by the current thread. */ static bool IsLocked(Isolate* isolate = NULL); @@ -3769,7 +3789,7 @@ class Internals { static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; - static const int kJSObjectType = 0xa3; + static const int kJSObjectType = 0xa6; static const int kFirstNonstringType = 0x80; static const int kForeignType = 0x85; diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript old mode 100644 new mode 100755 index 52607f15c5..f3ae8078ba --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -84,6 +84,7 @@ SOURCES = { hydrogen.cc hydrogen-instructions.cc ic.cc + incremental-marking.cc inspector.cc interpreter-irregexp.cc isolate.cc @@ -133,6 +134,7 @@ SOURCES = { v8utils.cc variables.cc version.cc + store-buffer.cc zone.cc extensions/gc-extension.cc extensions/externalize-string-extension.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 5c8a3142ae..c052267254 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -185,7 +185,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { int end_marker; heap_stats.end_marker = &end_marker; i::Isolate* isolate = i::Isolate::Current(); - isolate->heap()->RecordStats(&heap_stats, take_snapshot); + // BUG(1718): + // Don't use the take_snapshot since we don't support HeapIterator here + // without doing a special GC. + isolate->heap()->RecordStats(&heap_stats, false); i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); { @@ -501,9 +504,12 @@ void RegisterExtension(Extension* that) { Extension::Extension(const char* name, const char* source, int dep_count, - const char** deps) + const char** deps, + int source_length) : name_(name), - source_(source), + source_length_(source_length >= 0 ? + source_length : (source ? strlen(source) : 0)), + source_(source, source_length_), dep_count_(dep_count), deps_(deps), auto_enable_(false) { } @@ -1781,7 +1787,7 @@ v8::Handle Message::GetStackTrace() const { static i::Handle CallV8HeapFunction(const char* name, i::Handle recv, int argc, - i::Object** argv[], + i::Handle argv[], bool* has_pending_exception) { i::Isolate* isolate = i::Isolate::Current(); i::Handle fmt_str = isolate->factory()->LookupAsciiSymbol(name); @@ -1798,10 +1804,10 @@ static i::Handle CallV8HeapFunction(const char* name, static i::Handle CallV8HeapFunction(const char* name, i::Handle data, bool* has_pending_exception) { - i::Object** argv[1] = { data.location() }; + i::Handle argv[] = { data }; return CallV8HeapFunction(name, i::Isolate::Current()->js_builtins_object(), - 1, + ARRAY_SIZE(argv), argv, has_pending_exception); } @@ -2621,10 +2627,11 @@ bool Value::Equals(Handle that) const { if (obj->IsJSObject() && other->IsJSObject()) { return *obj == *other; } - i::Object** args[1] = { other.location() }; + i::Handle args[] = { other }; EXCEPTION_PREAMBLE(isolate); i::Handle result = - CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception); + CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args, + &has_pending_exception); EXCEPTION_BAILOUT_CHECK(isolate, false); return *result == i::Smi::FromInt(i::EQUAL); } @@ -3204,21 +3211,10 @@ bool v8::Object::SetHiddenValue(v8::Handle key, ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); - i::Handle hidden_props(i::GetHiddenProperties( - self, - i::JSObject::ALLOW_CREATION)); - i::Handle key_obj = Utils::OpenHandle(*key); + i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); - EXCEPTION_PREAMBLE(isolate); - i::Handle obj = i::SetProperty( - hidden_props, - key_obj, - value_obj, - static_cast(None), - i::kNonStrictMode); - has_pending_exception = obj.is_null(); - EXCEPTION_BAILOUT_CHECK(isolate, false); - return true; + i::Handle result = i::SetHiddenProperty(self, key_obj, value_obj); + return *result == *self; } @@ -3228,20 +3224,9 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { return Local()); ENTER_V8(isolate); i::Handle self = Utils::OpenHandle(this); - i::Handle hidden_props(i::GetHiddenProperties( - self, - i::JSObject::OMIT_CREATION)); - if (hidden_props->IsUndefined()) { - return v8::Local(); - } i::Handle key_obj = Utils::OpenHandle(*key); - EXCEPTION_PREAMBLE(isolate); - i::Handle result = i::GetProperty(hidden_props, key_obj); - has_pending_exception = result.is_null(); - EXCEPTION_BAILOUT_CHECK(isolate, v8::Local()); - if (result->IsUndefined()) { - return v8::Local(); - } + i::Handle result(self->GetHiddenProperty(*key_obj)); + if (result->IsUndefined()) return v8::Local(); return Utils::ToLocal(result); } @@ -3252,15 +3237,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle key) { ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); - i::Handle hidden_props(i::GetHiddenProperties( - self, - i::JSObject::OMIT_CREATION)); - if (hidden_props->IsUndefined()) { - return true; - } - i::Handle js_obj(i::JSObject::cast(*hidden_props)); i::Handle key_obj = Utils::OpenHandle(*key); - return i::DeleteProperty(js_obj, key_obj)->IsTrue(); + self->DeleteHiddenProperty(*key_obj); + return true; } @@ -3310,22 +3289,12 @@ void PrepareExternalArrayElements(i::Handle object, i::Handle array = isolate->factory()->NewExternalArray(length, array_type, data); - // If the object already has external elements, create a new, unique - // map if the element type is now changing, because assumptions about - // generated code based on the receiver's map will be invalid. - i::Handle elements(object->elements()); - bool cant_reuse_map = - elements->map()->IsUndefined() || - !elements->map()->has_external_array_elements() || - elements->map() != isolate->heap()->MapForExternalArrayType(array_type); - if (cant_reuse_map) { - i::Handle external_array_map = - isolate->factory()->GetElementsTransitionMap( - i::Handle(object->map()), - GetElementsKindFromExternalArrayType(array_type), - object->HasFastProperties()); - object->set_map(*external_array_map); - } + i::Handle external_array_map = + isolate->factory()->GetElementsTransitionMap( + object, + GetElementsKindFromExternalArrayType(array_type)); + + object->set_map(*external_array_map); object->set_elements(*array); } @@ -3484,7 +3453,8 @@ bool v8::Object::IsCallable() { } -Local Object::CallAsFunction(v8::Handle recv, int argc, +Local Object::CallAsFunction(v8::Handle recv, + int argc, v8::Handle argv[]) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::CallAsFunction()", @@ -3495,7 +3465,7 @@ Local Object::CallAsFunction(v8::Handle recv, int argc, i::Handle obj = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast(argv); + i::Handle* args = reinterpret_cast*>(argv); i::Handle fun = i::Handle(); if (obj->IsJSFunction()) { fun = i::Handle::cast(obj); @@ -3525,7 +3495,7 @@ Local Object::CallAsConstructor(int argc, i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast(argv); + i::Handle* args = reinterpret_cast*>(argv); if (obj->IsJSFunction()) { i::Handle fun = i::Handle::cast(obj); EXCEPTION_PREAMBLE(isolate); @@ -3567,7 +3537,7 @@ Local Function::NewInstance(int argc, HandleScope scope; i::Handle function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast(argv); + i::Handle* args = reinterpret_cast*>(argv); EXCEPTION_PREAMBLE(isolate); i::Handle returned = i::Execution::New(function, argc, args, &has_pending_exception); @@ -3588,7 +3558,7 @@ Local Function::Call(v8::Handle recv, int argc, i::Handle fun = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast(argv); + i::Handle* args = reinterpret_cast*>(argv); EXCEPTION_PREAMBLE(isolate); i::Handle returned = i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception); @@ -3799,10 +3769,11 @@ bool v8::String::IsExternalAscii() const { void v8::String::VerifyExternalStringResource( v8::String::ExternalStringResource* value) const { i::Handle str = Utils::OpenHandle(this); - v8::String::ExternalStringResource* expected; + const v8::String::ExternalStringResource* expected; if (i::StringShape(*str).IsExternalTwoByte()) { - void* resource = i::Handle::cast(str)->resource(); - expected = reinterpret_cast(resource); + const void* resource = + i::Handle::cast(str)->resource(); + expected = reinterpret_cast(resource); } else { expected = NULL; } @@ -3810,7 +3781,7 @@ void v8::String::VerifyExternalStringResource( } -v8::String::ExternalAsciiStringResource* +const v8::String::ExternalAsciiStringResource* v8::String::GetExternalAsciiStringResource() const { i::Handle str = Utils::OpenHandle(this); if (IsDeadCheck(str->GetIsolate(), @@ -3818,8 +3789,9 @@ v8::String::ExternalAsciiStringResource* return NULL; } if (i::StringShape(*str).IsExternalAscii()) { - void* resource = i::Handle::cast(str)->resource(); - return reinterpret_cast(resource); + const void* resource = + i::Handle::cast(str)->resource(); + return reinterpret_cast(resource); } else { return NULL; } @@ -4009,7 +3981,7 @@ bool v8::V8::IdleNotification() { void v8::V8::LowMemoryNotification() { i::Isolate* isolate = i::Isolate::Current(); if (!isolate->IsInitialized()) return; - isolate->heap()->CollectAllGarbage(true); + isolate->heap()->CollectAllAvailableGarbage(); } @@ -5480,6 +5452,12 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) { wait_for_connection); } + +void Debug::DisableAgent() { + return i::Isolate::Current()->debugger()->StopAgent(); +} + + void Debug::ProcessDebugMessages() { i::Execution::ProcessDebugMesssages(true); } @@ -5804,6 +5782,16 @@ const HeapGraphNode* HeapGraphNode::GetDominatorNode() const { } +v8::Handle HeapGraphNode::GetHeapValue() const { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue"); + i::Handle object = ToInternal(this)->GetHeapObject(); + return v8::Handle(!object.is_null() ? + ToApi(object) : ToApi( + isolate->factory()->undefined_value())); +} + + static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { return const_cast( reinterpret_cast(snapshot)); diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 3e19a45385..54c291d412 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -77,6 +77,11 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); + if (host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -101,6 +106,10 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -131,6 +140,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -147,6 +162,11 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -195,7 +215,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitPointer(target_object_address()); + visitor->VisitEmbeddedPointer(host(), target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { @@ -221,7 +241,7 @@ template void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address()); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 0ec36921ab..329493a340 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() { void CpuFeatures::Probe() { - ASSERT(!initialized_); + unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | + CpuFeaturesImpliedByCompiler()); + ASSERT(supported_ == 0 || supported_ == standard_features); #ifdef DEBUG initialized_ = true; #endif @@ -86,8 +88,7 @@ void CpuFeatures::Probe() { // Get the features implied by the OS and the compiler settings. This is the // minimal set of features which is also alowed for generated code in the // snapshot. - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - supported_ |= CpuFeaturesImpliedByCompiler(); + supported_ |= standard_features; if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). @@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants + // We do not try to reuse pool constants. + RelocInfo rinfo(pc_, rmode, data, NULL); if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) @@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); + RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 9a586936fe..d19b64da54 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase { PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions + Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast(buffer_ + pos) = instr; + } static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } static void instr_at_put(byte* pc, Instr instr) { *reinterpret_cast(pc) = instr; @@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase { int buffer_space() const { return reloc_info_writer.pos() - pc_; } - // Read/patch instructions - Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast(buffer_ + pos) = instr; - } - // Decode branch instruction at pos and return branch target pos int target_at(int pos); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 60d2081c29..32b7896a52 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -582,10 +582,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&convert_argument); __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); - __ EnterInternalFrame(); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + } __ pop(function); __ mov(argument, r0); __ b(&argument_is_string); @@ -601,10 +602,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); - __ EnterInternalFrame(); - __ push(argument); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } __ Ret(); } @@ -617,12 +619,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that the function is not a smi. __ JumpIfSmi(r1, &non_function_call); // Check that the function is a JSFunction. __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &non_function_call); + __ b(ne, &slow); // Jump to the function-specific construct stub. __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); @@ -631,10 +633,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // r0: number of arguments // r1: called object + // r2: object type + Label do_call; + __ bind(&slow); + __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(ne, &non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand(0, RelocInfo::NONE)); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -650,321 +661,329 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Isolate* isolate = masm->isolate(); // Enter a construct frame. - __ EnterConstructFrame(); - - // Preserve the two incoming parameters on the stack. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // Smi-tagged arguments count. - __ push(r1); // Constructor function. - - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + // Preserve the two incoming parameters on the stack. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ mov(r2, Operand(debug_step_in_fp)); - __ ldr(r2, MemOperand(r2)); - __ tst(r2, r2); - __ b(ne, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(ne, &rt_call); #endif - // Load the initial map and verify that it is in fact a map. - // r1: constructor function - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &rt_call); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ b(ne, &rt_call); + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(r2, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // r1: constructor function - // r2: initial map - __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); - __ b(eq, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); - __ ldrb(r4, constructor_count); - __ sub(r4, r4, Operand(1), SetCC); - __ strb(r4, constructor_count); - __ b(ne, &allocate); - - __ Push(r1, r2); - - __ push(r1); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ pop(r2); - __ pop(r1); - - __ bind(&allocate); - } - - // Now allocate the JSObject on the heap. - // r1: constructor function - // r2: initial map - __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); + __ ldrb(r4, constructor_count); + __ sub(r4, r4, Operand(1), SetCC); + __ strb(r4, constructor_count); + __ b(ne, &allocate); + + __ Push(r1, r2); + + __ push(r1); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(r2); + __ pop(r1); + + __ bind(&allocate); + } - // Allocated the JSObject, now initialize the fields. Map is set to initial - // map and properties and elements are set to empty fixed array. - // r1: constructor function - // r2: initial map - // r3: object size - // r4: JSObject (not tagged) - __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); - __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - - // Fill all the in-object properties with the appropriate filler. - // r1: constructor function - // r2: initial map - // r3: object size (in words) - // r4: JSObject (not tagged) - // r5: First in-object property of JSObject (not tagged) - __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - { Label loop, entry; + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with the appropriate filler. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); if (count_constructions) { + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2)); + // r0: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(r0, r6); + __ Assert(le, "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(r5, r0, r7); // To allow for truncation. __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex); - } else { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r5, r6); - __ b(lt, &loop); - } + __ InitializeFieldsWithFiller(r5, r6, r7); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with + // allocated object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields + // and in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ add(r3, r3, Operand(r6)); + __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte, + kBitsPerByte); + __ sub(r3, r3, Operand(r6), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + r0, + r5, + r6, + r2, + &undo_allocation, + static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. + // r1: constructor function + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ cmp(r7, r8); + __ Assert(eq, "Undefined value not loaded."); + } + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, r6); + __ b(lt, &loop); + } - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - __ add(r4, r4, Operand(kHeapObjectTag)); + // Store the initialized FixedArray into the properties field of + // the JSObject + // r1: constructor function + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated + // r1: constructor function + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Check if a non-empty properties array is needed. Continue with allocated - // object if not fall through to runtime call if it is. + // Allocate the new receiver object using the runtime call. // r1: constructor function + __ bind(&rt_call); + __ push(r1); // argument for Runtime_NewObject + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(r4, r0); + + // Receiver for constructor call allocated. // r4: JSObject - // r5: start of next object (not tagged) - __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields and - // in-object properties. - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8); - __ add(r3, r3, Operand(r6)); - __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8); - __ sub(r3, r3, Operand(r6), SetCC); - - // Done if no extra properties are to be allocated. - __ b(eq, &allocated); - __ Assert(pl, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: start of next object - __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace( - r0, - r5, - r6, - r2, - &undo_allocation, - static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); - - // Initialize the FixedArray. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); - __ mov(r2, r5); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); - __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); - - // Initialize the fields to undefined. + __ bind(&allocated); + __ push(r4); + + // Push the function and the allocated receiver from the stack. + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ push(r1); // Constructor function. + __ push(r4); // Receiver. + + // Reload the number of arguments from the stack. // r1: constructor function - // r2: First element of FixedArray (not tagged) - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); - { Label loop, entry; - if (count_constructions) { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); - __ cmp(r7, r8); - __ Assert(eq, "Undefined value not loaded."); - } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r2, r6); - __ b(lt, &loop); - } - - // Store the initialized FixedArray into the properties field of - // the JSObject + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); + + // Setup pointer to last argument. + __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Setup number of arguments for function call below + __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + + // Copy arguments and receiver to the expression stack. + // r0: number of arguments + // r2: address of last argument (caller sp) // r1: constructor function - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. - __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + // r3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); + __ push(ip); + __ bind(&entry); + __ sub(r3, r3, Operand(2), SetCC); + __ b(ge, &loop); - // Continue with JSObject being successfully allocated + // Call the function. + // r0: number of arguments // r1: constructor function - // r4: JSObject - __ jmp(&allocated); - - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // r4: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(r4, r5); - } - - // Allocate the new receiver object using the runtime call. - // r1: constructor function - __ bind(&rt_call); - __ push(r1); // argument for Runtime_NewObject - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(r4, r0); - - // Receiver for constructor call allocated. - // r4: JSObject - __ bind(&allocated); - __ push(r4); - - // Push the function and the allocated receiver from the stack. - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // Constructor function. - __ push(r4); // Receiver. - - // Reload the number of arguments from the stack. - // r1: constructor function - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - - // Setup pointer to last argument. - __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - - // Setup number of arguments for function call below - __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + if (is_api_function) { + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + Handle code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Copy arguments and receiver to the expression stack. - // r0: number of arguments - // r2: address of last argument (caller sp) - // r1: constructor function - // r3: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - Label loop, entry; - __ b(&entry); - __ bind(&loop); - __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); - __ push(ip); - __ bind(&entry); - __ sub(r3, r3, Operand(2), SetCC); - __ b(ge, &loop); + // Pop the function from the stack. + // sp[0]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ pop(); - // Call the function. - // r0: number of arguments - // r1: constructor function - if (is_api_function) { - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - Handle code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Restore context from the frame. + // r0: result + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(r0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ ldr(r0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + + // Leave construct frame. } - // Pop the function from the stack. - // sp[0]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ pop(); - - // Restore context from the frame. - // r0: result - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(r0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &exit); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ ldr(r0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); - __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); @@ -997,63 +1016,64 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: argv // r5-r7, cp may be clobbered - // Clear the context before we push it when entering the JS frame. + // Clear the context before we push it when entering the internal frame. __ mov(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Set up the context from the function argument. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Set up the context from the function argument. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); - __ mov(r10, Operand(roots_address)); + // Set up the roots register. + ExternalReference roots_address = + ExternalReference::roots_address(masm->isolate()); + __ mov(r10, Operand(roots_address)); - // Push the function and the receiver onto the stack. - __ push(r1); - __ push(r2); + // Push the function and the receiver onto the stack. + __ push(r1); + __ push(r2); - // Copy arguments to the stack in a loop. - // r1: function - // r3: argc - // r4: argv, i.e. points to first arg - Label loop, entry; - __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); - // r2 points past last arg. - __ b(&entry); - __ bind(&loop); - __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter - __ ldr(r0, MemOperand(r0)); // dereference handle - __ push(r0); // push parameter - __ bind(&entry); - __ cmp(r4, r2); - __ b(ne, &loop); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); - __ mov(r5, Operand(r4)); - __ mov(r6, Operand(r4)); - __ mov(r7, Operand(r4)); - if (kR9Available == 1) { - __ mov(r9, Operand(r4)); - } + // Copy arguments to the stack in a loop. + // r1: function + // r3: argc + // r4: argv, i.e. points to first arg + Label loop, entry; + __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); + // r2 points past last arg. + __ b(&entry); + __ bind(&loop); + __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter + __ ldr(r0, MemOperand(r0)); // dereference handle + __ push(r0); // push parameter + __ bind(&entry); + __ cmp(r4, r2); + __ b(ne, &loop); - // Invoke the code and pass argc as r0. - __ mov(r0, Operand(r3)); - if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ mov(r5, Operand(r4)); + __ mov(r6, Operand(r4)); + __ mov(r7, Operand(r4)); + if (kR9Available == 1) { + __ mov(r9, Operand(r4)); + } - // Exit the JS frame and remove the parameters (except function), and return. - // Respect ABI stack constraint. - __ LeaveInternalFrame(); + // Invoke the code and pass argc as r0. + __ mov(r0, Operand(r3)); + if (is_construct) { + __ Call(masm->isolate()->builtins()->JSConstructCall()); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } + // Exit the JS frame and remove the parameters (except function), and + // return. + // Respect ABI stack constraint. + } __ Jump(lr); // r0: result @@ -1072,26 +1092,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyCompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ Jump(r2); @@ -1100,26 +1121,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ Jump(r2); @@ -1128,12 +1150,13 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - __ EnterInternalFrame(); - // Pass the function and deoptimization type to the runtime system. - __ mov(r0, Operand(Smi::FromInt(static_cast(type)))); - __ push(r0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass the function and deoptimization type to the runtime system. + __ mov(r0, Operand(Smi::FromInt(static_cast(type)))); + __ push(r0); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + } // Get the full codegen state from the stack and untag it -> r6. __ ldr(r6, MemOperand(sp, 0 * kPointerSize)); @@ -1173,9 +1196,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - __ EnterInternalFrame(); - __ CallRuntime(Runtime::kNotifyOSR, 0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); __ Ret(); } @@ -1191,10 +1215,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1276,17 +1301,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ b(ge, &shift_arguments); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. - __ push(r0); - __ push(r2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(r2, r0); + { + // Enter an internal frame in order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. + __ push(r0); + + __ push(r2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(r2, r0); + + __ pop(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + + // Exit the internal frame. + } - __ pop(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ LeaveInternalFrame(); // Restore the function to r1, and the flag to r4. __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ mov(r4, Operand(0, RelocInfo::NONE)); @@ -1406,156 +1437,157 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { const int kRecvOffset = 3 * kPointerSize; const int kFunctionOffset = 4 * kPointerSize; - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function - __ push(r0); - __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); - // Make r2 the space we have left. The stack might already be overflowed - // here which will cause r2 to become negative. - __ sub(r2, sp, r2); - // Check if the arguments will overflow the stack. - __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ b(gt, &okay); // Signed comparison. - - // Out of stack space. - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ push(r1); - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - // End of stack check. + __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r0); + __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + // Make r2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. + __ sub(r2, sp, r2); + // Check if the arguments will overflow the stack. + __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ b(gt, &okay); // Signed comparison. + + // Out of stack space. + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ push(r1); + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. - // Push current limit and index. - __ bind(&okay); - __ push(r0); // limit - __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index - __ push(r1); + // Push current limit and index. + __ bind(&okay); + __ push(r0); // limit + __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index + __ push(r1); - // Get the receiver. - __ ldr(r0, MemOperand(fp, kRecvOffset)); + // Get the receiver. + __ ldr(r0, MemOperand(fp, kRecvOffset)); - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &push_receiver); + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &push_receiver); - // Change context eagerly to get the right global object if necessary. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Load the shared function info while the function is still in r1. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + // Change context eagerly to get the right global object if necessary. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r1. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &push_receiver); - - // Do not transform the receiver for strict mode functions. - __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &push_receiver); - - // Compute the receiver in non-strict mode. - __ JumpIfSmi(r0, &call_to_object); - __ LoadRoot(r1, Heap::kNullValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - - // Check if the receiver is already a JavaScript object. - // r0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &push_receiver); - - // Convert the receiver to a regular object. - // r0: receiver - __ bind(&call_to_object); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ b(&push_receiver); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); - __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - // r0: receiver - __ bind(&push_receiver); - __ push(r0); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); - // Copy all arguments from the array to the stack. - Label entry, loop; - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ b(&entry); + // Do not transform the receiver for strict mode functions. + __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, &push_receiver); - // Load the current argument from the arguments array and push it to the - // stack. - // r0: current argument index - __ bind(&loop); - __ ldr(r1, MemOperand(fp, kArgsOffset)); - __ push(r1); - __ push(r0); + // Compute the receiver in non-strict mode. + __ JumpIfSmi(r0, &call_to_object); + __ LoadRoot(r1, Heap::kNullValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(r0); + // Check if the receiver is already a JavaScript object. + // r0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &push_receiver); - // Use inline caching to access the arguments. - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ add(r0, r0, Operand(1 << kSmiTagSize)); - __ str(r0, MemOperand(fp, kIndexOffset)); + // Convert the receiver to a regular object. + // r0: receiver + __ bind(&call_to_object); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ b(&push_receiver); - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ ldr(r1, MemOperand(fp, kLimitOffset)); - __ cmp(r0, r1); - __ b(ne, &loop); - - // Invoke the function. - Label call_proxy; - ParameterCount actual(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &call_proxy); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); + __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // r0: receiver + __ bind(&push_receiver); + __ push(r0); - // Tear down the internal frame and remove function, receiver and args. - __ LeaveInternalFrame(); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Jump(lr); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ b(&entry); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(r1); // add function proxy as last argument - __ add(r0, r0, Operand(1)); - __ mov(r2, Operand(0, RelocInfo::NONE)); - __ SetCallKind(r5, CALL_AS_METHOD); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); - __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Load the current argument from the arguments array and push it to the + // stack. + // r0: current argument index + __ bind(&loop); + __ ldr(r1, MemOperand(fp, kArgsOffset)); + __ push(r1); + __ push(r0); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r0); + + // Use inline caching to access the arguments. + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ add(r0, r0, Operand(1 << kSmiTagSize)); + __ str(r0, MemOperand(fp, kIndexOffset)); - __ LeaveInternalFrame(); + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ ldr(r1, MemOperand(fp, kLimitOffset)); + __ cmp(r0, r1); + __ b(ne, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &call_proxy); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Jump(lr); + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(r1); // add function proxy as last argument + __ add(r0, r0, Operand(1)); + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r5, CALL_AS_METHOD); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + // Tear down the internal frame and remove function, receiver and args. + } __ add(sp, sp, Operand(3 * kPointerSize)); __ Jump(lr); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index e65f6d9b69..e2a313372e 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -189,6 +189,72 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: function. + // [sp + kPointerSize]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r0, r1, r2, &gc, TAG_OBJECT); + + // Load the function from the stack. + __ ldr(r3, MemOperand(sp, 0)); + + // Load the serialized scope info from the stack. + __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); + + // Setup the object header. + __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ mov(r2, Operand(Smi::FromInt(length))); + __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(r3, &after_sentinel); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmp(r3, Operand::Zero()); + __ Assert(eq, message); + } + __ ldr(r3, GlobalObjectOperand()); + __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); + __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); + __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); + __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); + + // Copy the global object from the previous context. + __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX)); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); + } + + // Remove the on-stack argument and return. + __ mov(cp, r0); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -838,9 +904,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - // Call C routine that may not cause GC or other trouble. - __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), - 0, 2); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { @@ -857,6 +925,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } +bool WriteInt32ToHeapNumberStub::IsPregenerated() { + // These variants are compiled ahead of time. See next method. + if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { + return true; + } + if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { + return true; + } + // Other register combinations are generated as and when they are needed, + // so it is unsafe to call them from stubs (we can't generate a stub while + // we are generating a stub). + return false; +} + + +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { + WriteInt32ToHeapNumberStub stub1(r1, r0, r2); + WriteInt32ToHeapNumberStub stub2(r2, r0, r3); + stub1.GetCode()->set_is_pregenerated(true); + stub2.GetCode()->set_is_pregenerated(true); +} + + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1197,6 +1288,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } + + AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 0, 2); __ pop(pc); // Return. @@ -1214,7 +1307,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1606,6 +1699,8 @@ void CompareStub::Generate(MacroAssembler* masm) { // The stub expects its argument in the tos_ register and returns its result in // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // This stub uses VFP3 instructions. CpuFeatures::Scope scope(VFP3); @@ -1713,6 +1808,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ stm(db_w, sp, kCallerSaved | lr.bit()); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vstr(reg, MemOperand(sp, i * kDoubleSize)); + } + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = r1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ mov(r0, Operand(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, MemOperand(sp, i * kDoubleSize)); + } + __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + } + __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). +} + + void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. @@ -1866,12 +1996,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r1, Operand(r0)); + __ pop(r0); + } __ bind(&heapnumber_allocated); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); @@ -1912,13 +2043,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r2, r0); // Move the new heap number into r2. + // Get the heap number into r0, now that the new heap number is in r2. + __ pop(r0); + } // Convert the heap number in r0 to an untagged integer in r1. // This can't go slow-case because it's the same number we already @@ -2028,6 +2160,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -3133,10 +3269,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + } __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); @@ -3149,14 +3286,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in d2 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } } @@ -3173,6 +3311,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, } else { __ vmov(r0, r1, d2); } + AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction(ExternalReference::math_sin_double_function(isolate), @@ -3268,11 +3407,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_int_function(masm->isolate()), + 1, 1); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); + } __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3298,11 +3440,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(0, 2, scratch); __ SetCallCDoubleArguments(double_base, double_exponent); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); + } __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3319,6 +3464,37 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + StoreBufferOverflowStub stub(kSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(r0); } @@ -3430,8 +3606,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r3, MemOperand(ip)); + __ mov(r3, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); @@ -3469,6 +3644,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ sub(r6, r6, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); // Setup argc and the builtin function in callee-saved registers. @@ -3613,8 +3789,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r5, MemOperand(ip)); + __ mov(r5, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r5, MemOperand(ip)); @@ -3851,10 +4026,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - __ EnterInternalFrame(); - __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } __ cmp(r0, Operand::Zero()); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); @@ -4480,8 +4656,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4532,8 +4707,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r1, MemOperand(r1, 0)); + __ mov(r1, Operand(isolate->factory()->the_hole_value())); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(r2, 0)); @@ -4575,16 +4749,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. - __ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); + __ mov(r2, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + r2, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ mov(r3, last_match_info_elements); - __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -4712,6 +4895,22 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(false); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + UNREACHABLE(); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + UNREACHABLE(); + return NULL; +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -6425,12 +6624,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - __ EnterInternalFrame(); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r1, r0); + __ mov(ip, Operand(Smi::FromInt(op_))); + __ push(ip); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. @@ -6613,6 +6813,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // r1: key @@ -6702,6 +6904,267 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { r6, r4, r7, EMIT_REMEMBERED_SET }, + { r6, r2, r7, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. + // Also used in KeyedStoreIC::GenerateGeneric. + { r3, r4, r5, EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { r4, r1, r2, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. + { r1, r2, r3, EMIT_REMEMBERED_SET }, + { r3, r2, r1, EMIT_REMEMBERED_SET }, + // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { r2, r1, r3, EMIT_REMEMBERED_SET }, + { r3, r1, r2, EMIT_REMEMBERED_SET }, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { r4, r2, r3, EMIT_REMEMBERED_SET }, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +bool StoreBufferOverflowStub::IsPregenerated() { + return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + // See RecordWriteStub::Patch for details. + __ b(&skip_to_incremental_noncompacting); + __ b(&skip_to_incremental_compacting); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); + ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); + PatchBranchIntoNop(masm, 0); + PatchBranchIntoNop(masm, Assembler::kInstrSize); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(r0)); + __ Move(address, regs_.address()); + __ Move(r0, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(r1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ ldr(r1, MemOperand(address, 0)); + } + __ mov(r2, Operand(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 557f7e6d41..3ba75bab13 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -323,6 +342,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub { the_heap_number_(the_heap_number), scratch_(scratch) { } + bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + private: Register the_int_; Register the_heap_number_; @@ -371,6 +393,225 @@ class NumberToStringStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); + ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); + } + + static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); + ASSERT(Assembler::IsBranch(masm->instr_at(pos))); + } + + static Mode GetMode(Code* stub) { + Instr first_instruction = Assembler::instr_at(stub->instruction_start()); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() + + Assembler::kInstrSize); + + if (Assembler::IsBranch(first_instruction)) { + return INCREMENTAL; + } + + ASSERT(Assembler::IsTstImmediate(first_instruction)); + + if (Assembler::IsBranch(second_instruction)) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(Assembler::IsTstImmediate(second_instruction)); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + MacroAssembler masm(NULL, + stub->instruction_start(), + stub->instruction_size()); + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + PatchBranchIntoNop(&masm, 0); + PatchBranchIntoNop(&masm, Assembler::kInstrSize); + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 0); + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, Assembler::kInstrSize); + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); + } + + private: + // This is a helper class for freeing up 3 scratch registers. The input is + // two registers that must be preserved and one scratch register provided by + // the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); + } + + void Save(MacroAssembler* masm) { + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + // We don't have to save scratch0_ because it was given to us as + // a scratch register. + masm->push(scratch1_); + } + + void Restore(MacroAssembler* masm) { + masm->pop(scratch1_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + masm->sub(sp, + sp, + Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + // Save all VFP registers except d0. + for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + // Restore all VFP registers except d0. + for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + masm->add(sp, + sp, + Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + } + masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + + Register GetRegThatIsNotOneOf(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField {}; + class ValueBits: public BitField {}; + class AddressBits: public BitField {}; + class RememberedSetActionBits: public BitField {}; + class SaveFPRegsModeBits: public BitField {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. @@ -575,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -587,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index bf748a9b6a..3993ed02be 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -38,12 +38,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index d27982abac..1c0d508d2d 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -69,16 +69,6 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); - // Constants related to patching of inlined load/store. - static int GetInlinedKeyedLoadInstructionsAfterPatch() { - return FLAG_debug_code ? 32 : 13; - } - static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; - static int GetInlinedNamedStoreInstructionsAfterPatch() { - ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); - return Isolate::Current()->inlined_write_barrier_size() + 4; - } - private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 07a22722c8..b866f9cc8d 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - if ((object_regs | non_object_regs) != 0) { - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ tst(reg, Operand(0xc0000000)); - __ Assert(eq, "Unable to encode value as smi"); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ tst(reg, Operand(0xc0000000)); + __ Assert(eq, "Unable to encode value as smi"); + } + __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } - __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } + __ stm(db_w, sp, object_regs | non_object_regs); } - __ stm(db_w, sp, object_regs | non_object_regs); - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments - __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - if ((object_regs | non_object_regs) != 0) { - __ ldm(ia_w, sp, object_regs | non_object_regs); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - __ mov(reg, Operand(reg, LSR, kSmiTagSize)); - } - if (FLAG_debug_code && - (((object_regs |non_object_regs) & (1 << r)) == 0)) { - __ mov(reg, Operand(kDebugZapValue)); + __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments + __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ ldm(ia_w, sp, object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ mov(reg, Operand(kDebugZapValue)); + } } } - } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 00357f76db..bb03d740d1 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -112,12 +112,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } #endif + Isolate* isolate = code->GetIsolate(); + // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); + DeoptimizerData* data = isolate->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(code); + // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -134,7 +141,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; @@ -169,6 +177,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, reinterpret_cast(check_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast(replacement_code->entry()); + + RelocInfo rinfo(pc_after - 2 * kInstrSize, + RelocInfo::CODE_TARGET, + 0, + unoptimized_code); + unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( + unoptimized_code, &rinfo, replacement_code); } @@ -193,6 +208,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, reinterpret_cast(replacement_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast(check_code->entry()); + + check_code->GetHeap()->incremental_marking()-> + RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code); } @@ -632,7 +650,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(r5, Operand(ExternalReference::isolate_address())); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + } // Preserve "deoptimizer" object in register r0 and get the input // frame descriptor pointer to r1 (deoptimizer->input_); @@ -686,8 +707,11 @@ void Deoptimizer::EntryGenerator::Generate() { // r0: deoptimizer object; r1: scratch. __ PrepareCallCFunction(1, r1); // Call Deoptimizer::ComputeOutputFrames(). - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); + } __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 26bbd82d00..c66ceee931 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -70,6 +70,16 @@ static const RegList kCalleeSaved = 1 << 10 | // r10 v7 1 << 11; // r11 v8 (fp in JavaScript code) +// When calling into C++ (only for C++ calls that can't cause a GC). +// The call code will take care of lr, fp, etc. +static const RegList kCallerSaved = + 1 << 0 | // r0 + 1 << 1 | // r1 + 1 << 2 | // r2 + 1 << 3 | // r3 + 1 << 9; // r9 + + static const int kNumCalleeSaved = 7 + kR9Available; // Double registers d8 to d15 are callee-saved. diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 50ed8b1da7..f9a880f56b 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -39,6 +39,7 @@ #include "stub-cache.h" #include "arm/code-stubs-arm.h" +#include "arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -155,6 +156,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + int locals_count = info->scope()->num_stack_slots(); __ Push(lr, fp, cp, r1); @@ -200,13 +206,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - __ mov(r1, Operand(Context::SlotOffset(var->index()))); - __ str(r0, MemOperand(cp, r1)); - // Update the write barrier. This clobbers all involved - // registers, so we have to use two more registers to avoid - // clobbering cp. - __ mov(r2, Operand(cp)); - __ RecordWrite(r2, Operand(r1), r3, r0); + MemOperand target = ContextOperand(cp, var->index()); + __ str(r0, target); + + // Update the write barrier. + __ RecordWriteContextSlot( + cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); } } } @@ -665,12 +670,15 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ str(src, location); + // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { - __ RecordWrite(scratch0, - Operand(Context::SlotOffset(var->index())), - scratch1, - src); + __ RecordWriteContextSlot(scratch0, + location.offset(), + src, + scratch1, + kLRHasBeenSaved, + kDontSaveFPRegs); } } @@ -746,8 +754,14 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ str(result_register(), ContextOperand(cp, variable->index())); int offset = Context::SlotOffset(variable->index()); // We know that we have written a function, which is not a smi. - __ mov(r1, Operand(cp)); - __ RecordWrite(r1, Operand(offset), r2, result_register()); + __ RecordWriteContextSlot(cp, + offset, + result_register(), + r2, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); } else if (mode == Variable::CONST || mode == Variable::LET) { Comment cmnt(masm_, "[ Declaration"); @@ -1211,9 +1225,17 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == Variable::DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == Variable::CONST || + local->mode() == Variable::LET) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + if (local->mode() == Variable::CONST) { + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } else { // Variable::LET + __ b(ne, done); + __ mov(r0, Operand(var->name())); + __ push(r0); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ jmp(done); } @@ -1490,14 +1512,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ ldr(r1, MemOperand(sp)); // Copy of array literal. - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r6, MemOperand(sp)); // Copy of array literal. + __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ str(result_register(), FieldMemOperand(r1, offset)); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store with r0 as the scratch // register. - __ RecordWrite(r1, Operand(offset), r2, result_register()); + __ RecordWriteField( + r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(r3, r2, &no_map_change); + __ push(r6); // Copy of array literal. + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1869,7 +1900,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = Context::SlotOffset(var->index()); - __ RecordWrite(r1, Operand(offset), r2, r3); + __ RecordWriteContextSlot( + r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); } } @@ -1887,7 +1919,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ str(r0, location); if (var->IsContextSlot()) { __ mov(r3, r0); - __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot( + r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2662,20 +2696,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); // Map is now in r0. __ b(lt, &null); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); - __ b(ge, &function); - - // Check if the constructor in the map is a function. + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ b(eq, &function); + + __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ b(eq, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ b(ne, &non_function_constructor); @@ -2853,7 +2891,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList* args) { __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); + __ mov(r2, r0); + __ RecordWriteField( + r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); __ bind(&done); context()->Plug(r0); @@ -3141,16 +3181,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList* args) { __ str(scratch1, MemOperand(index2, 0)); __ str(scratch2, MemOperand(index1, 0)); - Label new_space; - __ InNewSpace(elements, scratch1, eq, &new_space); + Label no_remembered_set; + __ CheckPageFlag(elements, + scratch1, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &no_remembered_set); // Possible optimization: do a check that both values are Smis // (or them and test against Smi mask.) - __ mov(scratch1, elements); - __ RecordWriteHelper(elements, index1, scratch2); - __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index1, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index2, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); - __ bind(&new_space); + __ bind(&no_remembered_set); // We are done. Drop elements from the stack, and return undefined. __ Drop(3); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); @@ -3898,10 +3953,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3942,9 +4001,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(r0, if_false); - __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - Split(ge, if_true, if_false, fall_through); - + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); + __ b(eq, if_true); + __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); + Split(eq, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { @@ -3963,18 +4024,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); - Split(eq, if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -3982,9 +4032,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -3992,13 +4045,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4085,8 +4131,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { - Comment cmnt(masm_, "[ CompareToNull"); +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4094,15 +4141,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ LoadRoot(r1, Heap::kNullValueRootIndex); + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(r1, nil_value); __ cmp(r0, r1); - if (expr->is_strict()) { + if (expr->op() == Token::EQ_STRICT) { Split(eq, if_true, if_false, fall_through); } else { + Heap::RootListIndex other_nil_value = nil == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; __ b(eq, if_true); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r1, other_nil_value); __ cmp(r0, r1); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 2e49cae928..6e0badca1d 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite(elements, scratch2, scratch1); + __ RecordWrite( + elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); } @@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack. __ ldr(r3, MemOperand(sp, argc * kPointerSize)); - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ Push(r3, r2); + // Push the receiver and the name of the function. + __ Push(r3, r2); - // Call the entry. - __ mov(r0, Operand(2)); - __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); + // Call the entry. + __ mov(r0, Operand(2)); + __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); - CEntryStub stub(1); - __ CallStub(&stub); + CEntryStub stub(1); + __ CallStub(&stub); - // Move result to r1 and leave the internal frame. - __ mov(r1, Operand(r0)); - __ LeaveInternalFrame(); + // Move result to r1 and leave the internal frame. + __ mov(r1, Operand(r0)); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -650,12 +652,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); - __ EnterInternalFrame(); - __ push(r2); // save the key - __ Push(r1, r2); // pass the receiver and the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(r2); // restore the key - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r2); // save the key + __ Push(r1, r2); // pass the receiver and the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(r2); // restore the key + } __ mov(r1, r0); __ jmp(&do_call); @@ -908,7 +911,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); __ str(r0, mapped_location); __ add(r6, r3, r5); - __ RecordWrite(r3, r6, r9); + __ mov(r9, r0); + __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in r3. @@ -916,7 +920,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); __ str(r0, unmapped_location); __ add(r6, r3, r4); - __ RecordWrite(r3, r6, r9); + __ mov(r9, r0); + __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -1267,13 +1272,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, fast, array, extra; + Label slow, array, extra, check_if_double_array; + Label fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; // Register usage. Register value = r0; Register key = r1; Register receiver = r2; Register elements = r3; // Elements array of the receiver. + Register elements_map = r6; + Register receiver_map = r7; // r4 and r5 are used as general scratch registers. // Check that the key is a smi. @@ -1281,35 +1290,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Check that the object isn't a smi. __ JumpIfSmi(receiver, &slow); // Get the map of the object. - __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); + __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ b(ne, &slow); // Check if the object is a JS array or not. - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); // Check that the object is some kind of JSObject. - __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE)); + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, &slow); - __ cmp(r4, Operand(JS_PROXY_TYPE)); - __ b(eq, &slow); - __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(eq, &slow); // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); - __ b(lo, &fast); + __ b(lo, &fast_object_with_map_check); // Slow case, handle jump to runtime. __ bind(&slow); @@ -1330,21 +1330,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); __ b(hs, &slow); + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ b(ne, &check_if_double_array); // Calculate key + 1 as smi. STATIC_ASSERT(kSmiTag == 0); __ add(r4, key, Operand(Smi::FromInt(1))); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ b(&fast); + __ b(&fast_object_without_map_check); + + __ bind(&check_if_double_array); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + __ b(ne, &slow); + // Add 1 to key, and go to common element store code for doubles. + STATIC_ASSERT(kSmiTag == 0); + __ add(r4, key, Operand(Smi::FromInt(1))); + __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); // Check the key against the length in the array. __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1352,18 +1362,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ b(hs, &extra); // Fall through to fast case. - __ bind(&fast); - // Fast case, store the value to the elements backing store. - __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(r5)); - // Skip write barrier if the written value is a smi. - __ tst(value, Operand(kSmiTagMask)); - __ Ret(eq); + __ bind(&fast_object_with_map_check); + Register scratch_value = r4; + Register address = r5; + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ b(ne, &fast_double_with_map_check); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); + __ Ret(); + + __ bind(&non_smi_value); + // Escape to slow case when writing non-smi into smi-only array. + __ CheckFastObjectElements(receiver_map, scratch_value, &slow); + // Fast elements array, store the value to the elements backing store. + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); // Update write barrier for the elements array address. - __ sub(r4, r5, Operand(elements)); - __ RecordWrite(elements, Operand(r4), r5, r6); + __ mov(scratch_value, value); // Preserve the value which is returned. + __ RecordWrite(elements, + address, + scratch_value, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Ret(); + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + __ b(ne, &slow); + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, + key, + receiver, + elements, + r4, + r5, + r6, + r7, + &slow); __ Ret(); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 30ccd05bee..84959397b6 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) { +void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -711,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); + int argument_index_accumulator = 0; + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator)); return instr; } @@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { +LEnvironment* LChunkBuilder::CreateEnvironment( + HEnvironment* hydrogen_env, + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { argument_count_, value_count, outer); - int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); + op = new LArgument((*argument_index_accumulator)++); } else { op = UseAny(value); } @@ -1444,9 +1449,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value())); + return new LIsNilAndBranch(UseRegisterAtStart(instr->value())); } @@ -1734,7 +1739,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->check_hole_value() + return instr->RequiresHoleCheck() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1748,14 +1753,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { - if (instr->check_hole_value()) { - LOperand* temp = TempRegister(); - LOperand* value = UseRegister(instr->value()); - return AssignEnvironment(new LStoreGlobalCell(value, temp)); - } else { - LOperand* value = UseRegisterAtStart(instr->value()); - return new LStoreGlobalCell(value, NULL); - } + LOperand* temp = TempRegister(); + LOperand* value = UseTempRegister(instr->value()); + LInstruction* result = new LStoreGlobalCell(value, temp); + if (instr->RequiresHoleCheck()) result = AssignEnvironment(result); + return result; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 8c18760fd1..73c7e459c3 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -627,16 +627,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNullAndBranch: public LControlInstruction<1, 0> { +class LIsNilAndBranch: public LControlInstruction<1, 0> { public: - explicit LIsNullAndBranch(LOperand* value) { + explicit LIsNilAndBranch(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - bool is_strict() const { return hydrogen()->is_strict(); } + EqualityKind kind() const { return hydrogen()->kind(); } + NilValue nil() const { return hydrogen()->nil(); } virtual void PrintDataTo(StringStream* stream); }; @@ -2159,7 +2160,8 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index f5d7449149..70ef884816 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -82,6 +82,14 @@ bool LCodeGen::GenerateCode() { status_ = GENERATING; CpuFeatures::Scope scope1(VFP3); CpuFeatures::Scope scope2(ARMv7); + + CodeStub::GenerateFPStubs(); + + // Open a frame scope to indicate that there is a frame on the stack. The + // NONE indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::NONE); + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -206,13 +214,11 @@ bool LCodeGen::GeneratePrologue() { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - __ mov(r1, Operand(Context::SlotOffset(var->index()))); - __ str(r0, MemOperand(cp, r1)); - // Update the write barrier. This clobbers all involved - // registers, so we have to use two more registers to avoid - // clobbering cp. - __ mov(r2, Operand(cp)); - __ RecordWrite(r2, Operand(r1), r3, r0); + MemOperand target = ContextOperand(cp, var->index()); + __ str(r0, target); + // Update the write barrier. This clobbers r3 and r0. + __ RecordWriteContextSlot( + cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -262,6 +268,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + Comment(";;; Deferred code @%d: %s.", + code->instruction_index(), + code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -739,7 +748,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(expected_safepoint_kind_ == kind); - const ZoneList* operands = pointers->operands(); + const ZoneList* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1032,6 +1041,7 @@ void LCodeGen::DoDivI(LDivI* instr) { virtual void Generate() { codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } + virtual LInstruction* instr() { return instr_; } private: LDivI* instr_; }; @@ -1743,25 +1753,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { +void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register scratch = scratch0(); Register reg = ToRegister(instr->InputAt(0)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); - // TODO(fsc): If the expression is known to be a smi, then it's - // definitely not null. Jump to the false block. + // If the expression is known to be untagged or a smi, then it's definitely + // not null, and it can't be a an undetectable object. + if (instr->hydrogen()->representation().IsSpecialization() || + instr->hydrogen()->type().IsSmi()) { + EmitGoto(false_block); + return; + } int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - __ LoadRoot(ip, Heap::kNullValueRootIndex); + Heap::RootListIndex nil_value = instr->nil() == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(ip, nil_value); __ cmp(reg, ip); - if (instr->is_strict()) { + if (instr->kind() == kStrictEquality) { EmitBranch(true_block, false_block, eq); } else { + Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ b(eq, true_label); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, other_nil_value); __ cmp(reg, ip); __ b(eq, true_label); __ JumpIfSmi(reg, false_label); @@ -1918,28 +1938,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); - __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); - __ b(lt, is_false); - // Map is now in temp. - // Functions have class 'Function'. - __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ b(ge, is_true); + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, is_false); + __ b(eq, is_true); + __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); + __ b(eq, is_true); } else { - __ b(ge, is_false); + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); + __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); + __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ b(gt, is_false); } + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -2016,9 +2044,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - + virtual LInstruction* instr() { return instr_; } Label* map_check() { return &map_check_; } - private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -2180,7 +2207,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(ip, Operand(Handle(instr->hydrogen()->cell()))); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - if (instr->hydrogen()->check_hole_value()) { + if (instr->hydrogen()->RequiresHoleCheck()) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(result, ip); DeoptimizeIf(eq, instr->environment()); @@ -2203,6 +2230,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); + Register scratch2 = ToRegister(instr->TempAt(0)); // Load the cell. __ mov(scratch, Operand(Handle(instr->hydrogen()->cell()))); @@ -2211,8 +2239,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. - if (instr->hydrogen()->check_hole_value()) { - Register scratch2 = ToRegister(instr->TempAt(0)); + if (instr->hydrogen()->RequiresHoleCheck()) { __ ldr(scratch2, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -2222,6 +2249,15 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // Store the value. __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); + + // Cells are always in the remembered set. + __ RecordWriteField(scratch, + JSGlobalPropertyCell::kValueOffset, + value, + scratch2, + kLRHasBeenSaved, + kSaveFPRegs, + OMIT_REMEMBERED_SET); } @@ -2247,10 +2283,15 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ str(value, ContextOperand(context, instr->slot_index())); + MemOperand target = ContextOperand(context, instr->slot_index()); + __ str(value, target); if (instr->needs_write_barrier()) { - int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWrite(context, Operand(offset), value, scratch0()); + __ RecordWriteContextSlot(context, + target.offset(), + value, + scratch0(), + kLRHasBeenSaved, + kSaveFPRegs); } } @@ -2500,13 +2541,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - if (instr->hydrogen()->RequiresHoleCheck()) { - // TODO(danno): If no hole check is required, there is no need to allocate - // elements into a temporary register, instead scratch can be used. - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); __ vldr(result, elements, 0); } @@ -2577,6 +2614,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2906,6 +2944,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } + virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3202,7 +3241,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -3262,7 +3301,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ str(value, FieldMemOperand(object, offset)); if (instr->needs_write_barrier()) { // Update the write barrier for the object for in-object properties. - __ RecordWrite(object, Operand(offset), value, scratch); + __ RecordWriteField( + object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs); } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -3270,7 +3310,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWrite(scratch, Operand(offset), value, object); + __ RecordWriteField( + scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs); } } } @@ -3301,6 +3342,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register scratch = scratch0(); + // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + // conversion, so it deopts in that case. + if (instr->hydrogen()->ValueNeedsSmiCheck()) { + __ tst(value, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } + // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3315,8 +3363,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { if (instr->hydrogen()->NeedsWriteBarrier()) { // Compute address of modified element and store it into key register. - __ add(key, scratch, Operand(FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value); + __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs); } } @@ -3417,6 +3465,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3452,6 +3501,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3575,6 +3625,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3646,6 +3697,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3711,6 +3763,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3819,16 +3872,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(instr->InputAt(0)); Register scratch1 = scratch0(); @@ -3911,6 +3954,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LTaggedToI* instr_; + }; + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4343,10 +4396,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(heap()->function_symbol())) { + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, input, scratch, - FIRST_CALLABLE_SPEC_OBJECT_TYPE); - final_branch_condition = ge; + __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); + __ b(eq, true_label); + __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); + final_branch_condition = eq; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4468,6 +4523,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index ead8489034..711e4595e7 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -376,16 +376,20 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; + virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -396,6 +400,7 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; + int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index f37f310218..b274b2fe42 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -42,7 +42,8 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true) { + allow_stub_calls_(true), + has_frame_(false) { if (isolate() != NULL) { code_object_ = Handle(isolate()->heap()->undefined_value(), isolate()); @@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source, } -void MacroAssembler::RecordWriteHelper(Register object, - Register address, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, ne, ¬_in_new_space); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); - } - - // Calculate page address. - Bfc(object, 0, kPageSizeBits); - - // Calculate region number. - Ubfx(address, address, Page::kRegionSizeLog2, - kPageSizeBits - Page::kRegionSizeLog2); - - // Mark region dirty. - ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); - mov(ip, Operand(1)); - orr(scratch, scratch, Operand(ip, LSL, address)); - str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); -} - - void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cond, @@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object, } -// Will clobber 4 registers: object, offset, scratch, ip. The -// register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Operand offset, - Register scratch0, - Register scratch1) { - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); - +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch0, eq, &done); + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done); + } - // Add offset into the object. - add(scratch0, object, offset); + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); - // Record the actual write. - RecordWriteHelper(object, scratch0, scratch1); + add(dst, object, Operand(offset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + tst(dst, Operand((1 << kPointerSizeLog2) - 1)); + b(eq, &ok); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + RecordWrite(object, + dst, + value, + lr_status, + save_fp, + remembered_set_action, + OMIT_SMI_CHECK); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Operand(BitCast(kZapValue))); - mov(scratch0, Operand(BitCast(kZapValue))); - mov(scratch1, Operand(BitCast(kZapValue))); + mov(value, Operand(BitCast(kZapValue + 4))); + mov(dst, Operand(BitCast(kZapValue + 8))); } } @@ -484,29 +473,94 @@ void MacroAssembler::RecordWrite(Register object, // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Register address, - Register scratch) { + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); + ASSERT(!address.is(cp) && !value.is(cp)); Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch, eq, &done); + if (smi_check == INLINE_SMI_CHECK) { + ASSERT_EQ(0, kSmiTag); + tst(value, Operand(kSmiTagMask)); + b(eq, &done); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + eq, + &done); // Record the actual write. - RecordWriteHelper(object, address, scratch); + if (lr_status == kLRHasNotBeenSaved) { + push(lr); + } + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); + if (lr_status == kLRHasNotBeenSaved) { + pop(lr); + } bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Operand(BitCast(kZapValue))); - mov(address, Operand(BitCast(kZapValue))); - mov(scratch, Operand(BitCast(kZapValue))); + mov(address, Operand(BitCast(kZapValue + 12))); + mov(value, Operand(BitCast(kZapValue + 16))); + } +} + + +void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. + Register address, + Register scratch, + SaveFPRegsMode fp_mode, + RememberedSetFinalAction and_then) { + Label done; + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok); + stop("Remembered set pointer is in new space"); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + mov(ip, Operand(store_buffer)); + ldr(scratch, MemOperand(ip)); + // Store pointer to buffer and increment buffer top. + str(address, MemOperand(scratch, kPointerSize, PostIndex)); + // Write back new top of buffer. + str(scratch, MemOperand(ip)); + // Call stub on end of buffer. + // Check for end of buffer. + tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kFallThroughAtEnd) { + b(eq, &done); + } else { + ASSERT(and_then == kReturnAtEnd); + Ret(ne); + } + push(lr); + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(fp_mode); + CallStub(&store_buffer_overflow); + pop(lr); + bind(&done); + if (and_then == kReturnAtEnd) { + Ret(); } } @@ -961,6 +1015,9 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, Handle::null(), code, &done, flag, @@ -988,6 +1045,9 @@ void MacroAssembler::InvokeCode(Handle code, RelocInfo::Mode rmode, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, code, no_reg, &done, flag, @@ -1011,6 +1071,9 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + // Contract with called JS functions requires that function is passed in r1. ASSERT(fun.is(r1)); @@ -1035,6 +1098,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -1090,10 +1156,10 @@ void MacroAssembler::IsObjectJSStringType(Register object, #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { - ASSERT(allow_stub_calls()); mov(r0, Operand(0, RelocInfo::NONE)); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); + ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif @@ -1793,13 +1859,127 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); b(hi, fail); } +void MacroAssembler::CheckFastObjectElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + b(ls, fail); + cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + b(hi, fail); +} + + +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + b(hi, fail); +} + + +void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail) { + Label smi_value, maybe_nan, have_double_value, is_nan, done; + Register mantissa_reg = scratch2; + Register exponent_reg = scratch3; + + // Handle smi values specially. + JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + CheckMap(value_reg, + scratch1, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); + ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + cmp(exponent_reg, scratch1); + b(ge, &maybe_nan); + + ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + bind(&have_double_value); + add(scratch1, elements_reg, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + str(exponent_reg, FieldMemOperand(scratch1, offset)); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + b(gt, &is_nan); + ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + cmp(mantissa_reg, Operand(0)); + b(eq, &have_double_value); + bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + mov(mantissa_reg, Operand(static_cast(nan_int64))); + mov(exponent_reg, Operand(static_cast(nan_int64 >> 32))); + jmp(&have_double_value); + + bind(&smi_value); + add(scratch1, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + add(scratch1, scratch1, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + // scratch1 is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(VFP3)) { + destination = FloatingPointHelper::kVFPRegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble(this, + untagged_value, + destination, + d0, + mantissa_reg, + exponent_reg, + scratch4, + s2); + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + vstr(d0, scratch1, 0); + } else { + str(mantissa_reg, MemOperand(scratch1, 0)); + str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); + } + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle map, @@ -1895,13 +2075,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1913,13 +2093,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2022,6 +2201,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { add(sp, sp, Operand(num_arguments * kPointerSize)); @@ -2417,8 +2602,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1); - stub.SaveDoubles(); + CEntryStub stub(1, kSaveFPRegs); CallStub(&stub); } @@ -2491,6 +2675,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + GetBuiltinEntry(r2, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(r2)); @@ -2622,14 +2809,20 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); mov(r0, Operand(p0)); push(r0); mov(r0, Operand(Smi::FromInt(p1 - p0))); push(r0); - CallRuntime(Runtime::kAbort, 2); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } // will not return here if (is_const_pool_blocked()) { // If the calling code cares about the exact number of @@ -2930,6 +3123,19 @@ void MacroAssembler::CopyBytes(Register src, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + b(&entry); + bind(&loop); + str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); + bind(&entry); + cmp(start_offset, end_offset); + b(lt, &loop); +} + + void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { @@ -3089,23 +3295,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { - CallCFunctionHelper(no_reg, - function, - ip, - num_reg_arguments, - num_double_arguments); + mov(ip, Operand(function)); + CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(Register function, - Register scratch, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, - ExternalReference::the_hole_value_location(isolate()), - scratch, - num_reg_arguments, - num_double_arguments); + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } @@ -3116,17 +3314,15 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, - Register scratch, int num_arguments) { - CallCFunction(function, scratch, num_arguments, 0); + CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, int num_reg_arguments, int num_double_arguments) { + ASSERT(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -3150,10 +3346,6 @@ void MacroAssembler::CallCFunctionHelper(Register function, // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. - if (function.is(no_reg)) { - mov(scratch, Operand(function_reference)); - function = scratch; - } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); @@ -3185,6 +3377,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met) { + and_(scratch, object, Operand(~Page::kPageAlignmentMask)); + ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + tst(scratch, Operand(mask)); + b(cc, condition_met); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black) { + HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + int first_bit, + int second_bit) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + tst(ip, Operand(mask_scratch)); + b(first_bit == 1 ? eq : ne, &other_color); + // Shift left 1 by adding. + add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); + b(eq, &word_boundary); + tst(ip, Operand(mask_scratch)); + b(second_bit == 1 ? ne : eq, has_color); + jmp(&other_color); + + bind(&word_boundary); + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); + tst(ip, Operand(1)); + b(second_bit == 1 ? ne : eq, has_color); + bind(&other_color); +} + + +// Detect some, but not all, common pointer-free objects. This is used by the +// incremental write barrier which doesn't care about oddballs (they are always +// marked black immediately so this code is not hit). +void MacroAssembler::JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object) { + Label is_data_object; + ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); + b(eq, &is_data_object); + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); + b(ne, not_data_object); + bind(&is_data_object); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); + Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); + const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; + Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); + add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); + mov(ip, Operand(1)); + mov(mask_reg, Operand(ip, LSL, mask_reg)); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Register load_scratch, + Label* value_is_white_and_not_data) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + tst(mask_scratch, load_scratch); + b(ne, &done); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + // LSL may overflow, making the check conservative. + tst(load_scratch, Operand(mask_scratch, LSL, 1)); + b(eq, &ok); + stop("Impossible marking bit pattern"); + bind(&ok); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = load_scratch; // Holds map while checking type. + Register length = load_scratch; // Holds length of object after testing type. + Label is_data_object; + + // Check for heap-number + ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); + CompareRoot(map, Heap::kHeapNumberMapRootIndex); + mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); + b(eq, &is_data_object); + + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = load_scratch; + ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); + b(ne, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + tst(instance_type, Operand(kExternalStringTag)); + mov(length, Operand(ExternalString::kSize), LeaveCC, ne); + b(ne, &is_data_object); + + // Sequential string, either ASCII or UC16. + // For ASCII (char-size of 1) we shift the smi tag away to get the length. + // For UC16 (char-size of 2) we just leave the smi tag in place, thereby + // getting the length multiplied by 2. + ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + ldr(ip, FieldMemOperand(value, String::kLengthOffset)); + tst(instance_type, Operand(kStringEncodingMask)); + mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); + add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, length, Operand(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + orr(ip, ip, Operand(mask_scratch)); + str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + + and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + add(ip, ip, Operand(length)); + str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + + bind(&done); +} + + void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { Usat(output_reg, 8, Operand(input_reg)); } @@ -3234,6 +3605,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 6084fde2d3..8ee468a917 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -29,6 +29,7 @@ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #include "assembler.h" +#include "frames.h" #include "v8globals.h" namespace v8 { @@ -79,6 +80,14 @@ enum ObjectToDoubleFlags { }; +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; + + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -157,40 +166,126 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); + // --------------------------------------------------------------------------- + // GC Support + + void IncrementalMarkingRecordWriteHelper(Register object, + Register value, + Register address); + + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, ne, branch); + } - // Check if object is in new space. - // scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cond, // eq for new space, ne otherwise - Label* branch); - + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, eq, branch); + } - // For the page containing |object| mark the region covering [address] - // dirty. The object address must be in the first 8K of an allocated page. - void RecordWriteHelper(Register object, - Register address, - Register scratch); + // Check if an object has a given incremental marking color. + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + int first_bit, + int second_bit); - // For the page containing |object| mark the region covering - // [object+offset] dirty. The object address must be in the first 8K - // of an allocated page. The 'scratch' registers are used in the - // implementation and all 3 registers are clobbered by the - // operation, as well as the ip register. RecordWrite updates the - // write barrier even when storing smis. - void RecordWrite(Register object, - Operand offset, + void JumpIfBlack(Register object, Register scratch0, - Register scratch1); + Register scratch1, + Label* on_black); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Register scratch3, + Label* object_is_white_and_not_data); - // For the page containing |object| mark the region covering - // [address] dirty. The object address must be in the first 8K of an - // allocated page. All 3 registers are clobbered by the operation, - // as well as the ip register. RecordWrite updates the write barrier - // even when storing smis. - void RecordWrite(Register object, - Register address, - Register scratch); + // Detects conservatively whether an object is data-only, ie it does need to + // be scanned by the garbage collector. + void JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // MemOperand(reg, off). + inline void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + lr_status, + save_fp, + remembered_set_action, + smi_check); + } + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, + Register address, + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); // Push a handle. void Push(Handle handle); @@ -318,16 +413,6 @@ class MacroAssembler: public Assembler { const double imm, const Condition cond = al); - - // --------------------------------------------------------------------------- - // Activation frames - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -569,6 +654,13 @@ class MacroAssembler: public Assembler { Register length, Register scratch); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // --------------------------------------------------------------------------- // Support functions. @@ -608,6 +700,31 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Register scratch, + Label* fail); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail); + // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -830,11 +947,11 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, Register scratch, int num_arguments); + void CallCFunction(Register function, int num_arguments); void CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments); - void CallCFunction(Register function, Register scratch, + void CallCFunction(Register function, int num_reg_arguments, int num_double_arguments); @@ -902,6 +1019,9 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); // EABI variant for double arguments in use. bool use_eabi_hardfloat() { @@ -1048,10 +1168,12 @@ class MacroAssembler: public Assembler { void LoadInstanceDescriptors(Register map, Register descriptors); + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + private: void CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1067,16 +1189,25 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper, CallKind call_kind); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise. + Label* branch); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Leaves addr_reg unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); + // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); @@ -1084,6 +1215,7 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; // This handle will be patched with the code object on installation. Handle code_object_; diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index cd76edbf15..c876467938 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Isolate. __ mov(r3, Operand(ExternalReference::isolate_address())); - ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(function, argument_count); + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); + } // Check if function returned non-zero for success or zero for failure. __ cmp(r0, Operand(0, RelocInfo::NONE)); @@ -611,6 +614,12 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Entry code: __ bind(&entry_label_); + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. // Push arguments // Save callee-save registers. // Start new stack frame. diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 6af535553f..5704202622 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1618,6 +1618,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) { ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); intptr_t* address = reinterpret_cast(start_address); + // Catch null pointers a little earlier. + ASSERT(start_address > 8191 || start_address < 0); int reg = 0; while (rlist != 0) { if ((rlist & 1) != 0) { diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index f8565924b1..09ecc798c5 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -431,7 +431,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); + __ mov(name_reg, r0); + __ RecordWriteField(receiver_reg, + offset, + name_reg, + scratch, + kLRHasNotBeenSaved, + kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -444,7 +450,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); + __ mov(name_reg, r0); + __ RecordWriteField(scratch, + offset, + name_reg, + receiver_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs); } // Return the value (register r0). @@ -553,9 +565,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall( + MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -591,6 +604,8 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, ApiFunction fun(api_function_address); const int kApiStackSpace = 4; + + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // r0 = v8::Arguments& @@ -616,9 +631,11 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, ExternalReference ref = ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); + AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } + class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -794,7 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -811,7 +828,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -820,18 +838,19 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - __ EnterInternalFrame(); - __ Push(holder, name_); - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + } // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1228,7 +1247,10 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, ApiFunction fun(getter_address); const int kApiStackSpace = 1; + + FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); + // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); @@ -1288,41 +1310,43 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); - __ b(eq, &interceptor_failed); - __ LeaveInternalFrame(); - __ Ret(); + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1); + __ b(eq, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ Ret(); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. @@ -1556,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements; // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1571,11 +1595,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmp(r0, r4); __ b(gt, &attempt_to_grow_elements); + // Check if value is a smi. + Label with_write_barrier; + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); + __ JumpIfNotSmi(r4, &with_write_barrier); + // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ add(end_elements, elements, @@ -1585,14 +1613,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); // Check for a smi. - __ JumpIfNotSmi(r4, &with_write_barrier); - __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - __ InNewSpace(elements, r4, eq, &exit); - __ RecordWriteHelper(elements, end_elements, r4); + + __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(r6, r6, &call_builtin); + + // Save new length. + __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Push the element. + // We may need a register containing the address end_elements below, + // so write back the value in end_elements. + __ add(end_elements, elements, + Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); + + __ RecordWrite(elements, + end_elements, + r4, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ Drop(argc + 1); __ Ret(); @@ -1604,6 +1649,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } + __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(r2, &no_fast_elements_check); + __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastObjectElements(r7, r7, &call_builtin); + __ bind(&no_fast_elements_check); + Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate); @@ -1630,8 +1684,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ str(r6, MemOperand(r7)); // Push the argument. - __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize)); - __ str(r6, MemOperand(end_elements)); + __ str(r2, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2713,6 +2766,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); + __ mov(r1, r0); + __ RecordWriteField(r4, + JSGlobalPropertyCell::kValueOffset, + r1, + r2, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET); + Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ Ret(); @@ -3454,6 +3516,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3540,6 +3603,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3880,6 +3944,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3943,6 +4008,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4082,6 +4148,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4234,8 +4301,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4277,15 +4346,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - __ add(scratch, - elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ str(value_reg, - MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ RecordWrite(scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), - receiver_reg , elements_reg); - + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(value_reg, &miss_force_generic); + __ add(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ add(scratch, + scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value_reg, MemOperand(scratch)); + } else { + ASSERT(elements_kind == FAST_ELEMENTS); + __ add(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ add(scratch, + scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value_reg, MemOperand(scratch)); + __ mov(receiver_reg, value_reg); + __ RecordWrite(elements_reg, // Object. + scratch, // Address. + receiver_reg, // Value. + kLRHasNotBeenSaved, + kDontSaveFPRegs); + } // value_reg (r0) is preserved. // Done. __ Ret(); @@ -4309,15 +4396,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; + Label miss_force_generic; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register scratch = r3; - Register elements_reg = r4; - Register mantissa_reg = r5; - Register exponent_reg = r6; + Register elements_reg = r3; + Register scratch1 = r4; + Register scratch2 = r5; + Register scratch3 = r6; Register scratch4 = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4329,90 +4416,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ ldr(scratch, + __ ldr(scratch1, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ cmp(key_reg, scratch); + __ cmp(key_reg, scratch1); __ b(hs, &miss_force_generic); - // Handle smi values specially. - __ JumpIfSmi(value_reg, &smi_value); - - // Ensure that the object is a heap number - __ CheckMap(value_reg, - scratch, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); - __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - __ cmp(exponent_reg, scratch); - __ b(ge, &maybe_nan); - - __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - __ bind(&have_double_value); - __ add(scratch, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ str(exponent_reg, FieldMemOperand(scratch, offset)); - __ Ret(); - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ b(gt, &is_nan); - __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - __ cmp(mantissa_reg, Operand(0)); - __ b(eq, &have_double_value); - __ bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - __ mov(mantissa_reg, Operand(static_cast(nan_int64))); - __ mov(exponent_reg, Operand(static_cast(nan_int64 >> 32))); - __ jmp(&have_double_value); - - __ bind(&smi_value); - __ add(scratch, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - __ add(scratch, scratch, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - // scratch is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP3)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - - Register untagged_value = receiver_reg; - __ SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble( - masm, - untagged_value, - destination, - d0, - mantissa_reg, - exponent_reg, - scratch4, - s2); - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP3); - __ vstr(d0, scratch, 0); - } else { - __ str(mantissa_reg, MemOperand(scratch, 0)); - __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); - } + __ StoreNumberToDoubleElements(value_reg, + key_reg, + receiver_reg, + elements_reg, + scratch1, + scratch2, + scratch3, + scratch4, + &miss_force_generic); __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 4dd23c8bb4..e1d7c2064e 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -201,17 +201,14 @@ function ConvertToString(x) { function ConvertToLocaleString(e) { - if (e == null) { + if (IS_NULL_OR_UNDEFINED(e)) { return ''; } else { - // e_obj's toLocaleString might be overwritten, check if it is a function. - // Call ToString if toLocaleString is not a function. - // See issue 877615. + // According to ES5, seciton 15.4.4.3, the toLocaleString conversion + // must throw a TypeError if ToObject(e).toLocaleString isn't + // callable. var e_obj = ToObject(e); - if (IS_SPEC_FUNCTION(e_obj.toLocaleString)) - return ToString(e_obj.toLocaleString()); - else - return ToString(e); + return %ToString(e_obj.toLocaleString()); } } @@ -381,18 +378,31 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) { function ArrayToString() { - if (!IS_ARRAY(this)) { - throw new $TypeError('Array.prototype.toString is not generic'); + var array; + var func; + if (IS_ARRAY(this)) { + func = this.join; + if (func === ArrayJoin) { + return Join(this, this.length, ',', ConvertToString); + } + array = this; + } else { + array = ToObject(this); + func = array.join; } - return Join(this, this.length, ',', ConvertToString); + if (!IS_SPEC_FUNCTION(func)) { + return %_CallFunction(array, ObjectToString); + } + return %_CallFunction(array, func); } function ArrayToLocaleString() { - if (!IS_ARRAY(this)) { - throw new $TypeError('Array.prototype.toString is not generic'); - } - return Join(this, this.length, ',', ConvertToLocaleString); + var array = ToObject(this); + var arrayLen = array.length; + var len = TO_UINT32(arrayLen); + if (len === 0) return ""; + return Join(array, len, ',', ConvertToLocaleString); } @@ -993,21 +1003,24 @@ function ArrayFilter(f, receiver) { ["Array.prototype.filter"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = ToUint32(this.length); + var result = []; var result_length = 0; for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (%_CallFunction(receiver, current, i, this, f)) { + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (%_CallFunction(receiver, current, i, array, f)) { result[result_length++] = current; } } @@ -1022,19 +1035,22 @@ function ArrayForEach(f, receiver) { ["Array.prototype.forEach"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - %_CallFunction(receiver, current, i, this, f); + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + %_CallFunction(receiver, current, i, array, f); } } } @@ -1048,19 +1064,22 @@ function ArraySome(f, receiver) { ["Array.prototype.some"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (%_CallFunction(receiver, current, i, this, f)) return true; + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (%_CallFunction(receiver, current, i, array, f)) return true; } } return false; @@ -1073,19 +1092,22 @@ function ArrayEvery(f, receiver) { ["Array.prototype.every"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (!%_CallFunction(receiver, current, i, this, f)) return false; + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (!%_CallFunction(receiver, current, i, array, f)) return false; } } return true; @@ -1097,21 +1119,24 @@ function ArrayMap(f, receiver) { ["Array.prototype.map"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + var result = new $Array(); var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - accumulator[i] = %_CallFunction(receiver, current, i, this, f); + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + accumulator[i] = %_CallFunction(receiver, current, i, array, f); } } %MoveArrayContents(accumulator, result); @@ -1245,19 +1270,20 @@ function ArrayReduce(callback, current) { ["Array.prototype.reduce"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = ToUint32(this.length); var i = 0; - find_initial: if (%_ArgumentsLength() < 2) { for (; i < length; i++) { - current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { + current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { i++; break find_initial; } @@ -1267,9 +1293,9 @@ function ArrayReduce(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i < length; i++) { - var element = this[i]; - if (!IS_UNDEFINED(element) || i in this) { - current = %_CallFunction(receiver, current, element, i, this, callback); + var element = array[i]; + if (!IS_UNDEFINED(element) || i in array) { + current = %_CallFunction(receiver, current, element, i, array, callback); } } return current; @@ -1281,15 +1307,20 @@ function ArrayReduceRight(callback, current) { ["Array.prototype.reduceRight"]); } + // Pull out the length so that side effects are visible before the + // callback function is checked. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } - var i = ToUint32(this.length) - 1; + var i = length - 1; find_initial: if (%_ArgumentsLength() < 2) { for (; i >= 0; i--) { - current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { + current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { i--; break find_initial; } @@ -1299,9 +1330,9 @@ function ArrayReduceRight(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i >= 0; i--) { - var element = this[i]; - if (!IS_UNDEFINED(element) || i in this) { - current = %_CallFunction(receiver, current, element, i, this, callback); + var element = array[i]; + if (!IS_UNDEFINED(element) || i in array) { + current = %_CallFunction(receiver, current, element, i, array, callback); } } return current; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ad5f350816..bda85e69de 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -38,6 +38,7 @@ #include "deoptimizer.h" #include "execution.h" #include "ic-inl.h" +#include "incremental-marking.h" #include "factory.h" #include "runtime.h" #include "runtime-profiler.h" @@ -47,6 +48,7 @@ #include "ast.h" #include "regexp-macro-assembler.h" #include "platform.h" +#include "store-buffer.h" // Include native regexp-macro-assembler. #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 @@ -516,6 +518,7 @@ void RelocIterator::next() { RelocIterator::RelocIterator(Code* code, int mode_mask) { + rinfo_.host_ = code; rinfo_.pc_ = code->instruction_start(); rinfo_.data_ = 0; // Relocation info is read backwards. @@ -736,9 +739,38 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref) : address_(table_ref.address()) {} +ExternalReference ExternalReference:: + incremental_marking_record_write_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode))); +} + + +ExternalReference ExternalReference:: + incremental_evacuation_record_write_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode))); +} + + +ExternalReference ExternalReference:: + store_buffer_overflow_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow))); +} + + +ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { + return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); +} + + ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { - return ExternalReference(Redirect(isolate, - FUNCTION_ADDR(Runtime::PerformGC))); + return + ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); } @@ -802,17 +834,6 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( } -ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) { - return ExternalReference(isolate->factory()->the_hole_value().location()); -} - - -ExternalReference ExternalReference::arguments_marker_location( - Isolate* isolate) { - return ExternalReference(isolate->factory()->arguments_marker().location()); -} - - ExternalReference ExternalReference::roots_address(Isolate* isolate) { return ExternalReference(isolate->heap()->roots_address()); } @@ -840,9 +861,14 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) { } +ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { + return ExternalReference(isolate->heap()->store_buffer()->TopAddress()); +} + + ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { - Address mask = reinterpret_cast
(isolate->heap()->NewSpaceMask()); - return ExternalReference(mask); + return ExternalReference(reinterpret_cast
( + isolate->heap()->NewSpaceMask())); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index d58034df0d..01c3a70c65 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -143,6 +143,9 @@ class Label BASE_EMBEDDED { }; +enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; + + // ----------------------------------------------------------------------------- // Relocation information @@ -216,8 +219,9 @@ class RelocInfo BASE_EMBEDDED { RelocInfo() {} - RelocInfo(byte* pc, Mode rmode, intptr_t data) - : pc_(pc), rmode_(rmode), data_(data) { + + RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) + : pc_(pc), rmode_(rmode), data_(data), host_(host) { } static inline bool IsConstructCall(Mode mode) { @@ -258,6 +262,7 @@ class RelocInfo BASE_EMBEDDED { void set_pc(byte* pc) { pc_ = pc; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } + Code* host() const { return host_; } // Apply a relocation by delta bytes INLINE(void apply(intptr_t delta)); @@ -353,6 +358,7 @@ class RelocInfo BASE_EMBEDDED { byte* pc_; Mode rmode_; intptr_t data_; + Code* host_; #ifdef V8_TARGET_ARCH_MIPS // Code and Embedded Object pointers in mips are stored split // across two consecutive 32-bit instructions. Heap management @@ -561,6 +567,13 @@ class ExternalReference BASE_EMBEDDED { // pattern. This means that they have to be added to the // ExternalReferenceTable in serialize.cc manually. + static ExternalReference incremental_marking_record_write_function( + Isolate* isolate); + static ExternalReference incremental_evacuation_record_write_function( + Isolate* isolate); + static ExternalReference store_buffer_overflow_function( + Isolate* isolate); + static ExternalReference flush_icache_function(Isolate* isolate); static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference fill_heap_number_with_random_function( Isolate* isolate); @@ -577,12 +590,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); - // Static variable Factory::the_hole_value.location() - static ExternalReference the_hole_value_location(Isolate* isolate); - - // Static variable Factory::arguments_marker.location() - static ExternalReference arguments_marker_location(Isolate* isolate); - // Static variable Heap::roots_address() static ExternalReference roots_address(Isolate* isolate); @@ -606,6 +613,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_mask(Isolate* isolate); static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); + static ExternalReference new_space_mark_bits(Isolate* isolate); + + // Write barrier. + static ExternalReference store_buffer_top(Isolate* isolate); // Used for fast allocation in generated code. static ExternalReference new_space_allocation_top_address(Isolate* isolate); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 418cc432b6..d493814544 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -327,56 +327,77 @@ bool BinaryOperation::ResultOverwriteAllowed() { } -bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, - Handle* check) { - if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false; - - UnaryOperation* left_unary = left_->AsUnaryOperation(); - UnaryOperation* right_unary = right_->AsUnaryOperation(); - Literal* left_literal = left_->AsLiteral(); - Literal* right_literal = right_->AsLiteral(); - - // Check for the pattern: typeof == . - if (left_unary != NULL && left_unary->op() == Token::TYPEOF && - right_literal != NULL && right_literal->handle()->IsString()) { - *expr = left_unary->expression(); - *check = Handle::cast(right_literal->handle()); +static bool IsTypeof(Expression* expr) { + UnaryOperation* maybe_unary = expr->AsUnaryOperation(); + return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF; +} + + +// Check for the pattern: typeof equals . +static bool MatchLiteralCompareTypeof(Expression* left, + Token::Value op, + Expression* right, + Expression** expr, + Handle* check) { + if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) { + *expr = left->AsUnaryOperation()->expression(); + *check = Handle::cast(right->AsLiteral()->handle()); return true; } + return false; +} + - // Check for the pattern: == typeof . - if (right_unary != NULL && right_unary->op() == Token::TYPEOF && - left_literal != NULL && left_literal->handle()->IsString()) { - *expr = right_unary->expression(); - *check = Handle::cast(left_literal->handle()); +bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, + Handle* check) { + return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) || + MatchLiteralCompareTypeof(right_, op_, left_, expr, check); +} + + +static bool IsVoidOfLiteral(Expression* expr) { + UnaryOperation* maybe_unary = expr->AsUnaryOperation(); + return maybe_unary != NULL && + maybe_unary->op() == Token::VOID && + maybe_unary->expression()->AsLiteral() != NULL; +} + + +// Check for the pattern: void equals +static bool MatchLiteralCompareUndefined(Expression* left, + Token::Value op, + Expression* right, + Expression** expr) { + if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) { + *expr = right; return true; } - return false; } bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) { - if (op_ != Token::EQ_STRICT) return false; + return MatchLiteralCompareUndefined(left_, op_, right_, expr) || + MatchLiteralCompareUndefined(right_, op_, left_, expr); +} - UnaryOperation* left_unary = left_->AsUnaryOperation(); - UnaryOperation* right_unary = right_->AsUnaryOperation(); - // Check for the pattern: === void . - if (right_unary != NULL && right_unary->op() == Token::VOID && - right_unary->expression()->AsLiteral() != NULL) { - *expr = left_; +// Check for the pattern: null equals +static bool MatchLiteralCompareNull(Expression* left, + Token::Value op, + Expression* right, + Expression** expr) { + if (left->IsNullLiteral() && Token::IsEqualityOp(op)) { + *expr = right; return true; } + return false; +} - // Check for the pattern: void === . - if (left_unary != NULL && left_unary->op() == Token::VOID && - left_unary->expression()->AsLiteral() != NULL) { - *expr = right_; - return true; - } - return false; +bool CompareOperation::IsLiteralCompareNull(Expression** expr) { + return MatchLiteralCompareNull(left_, op_, right_, expr) || + MatchLiteralCompareNull(right_, op_, left_, expr); } @@ -529,7 +550,9 @@ bool Conditional::IsInlineable() const { bool VariableProxy::IsInlineable() const { - return var()->IsUnallocated() || var()->IsStackAllocated(); + return var()->IsUnallocated() + || var()->IsStackAllocated() + || var()->IsContextSlot(); } @@ -598,11 +621,6 @@ bool CompareOperation::IsInlineable() const { } -bool CompareToNull::IsInlineable() const { - return expression()->IsInlineable(); -} - - bool CountOperation::IsInlineable() const { return expression()->IsInlineable(); } @@ -746,37 +764,41 @@ bool Call::ComputeGlobalTarget(Handle global, void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind) { + is_monomorphic_ = oracle->CallIsMonomorphic(this); Property* property = expression()->AsProperty(); - ASSERT(property != NULL); - // Specialize for the receiver types seen at runtime. - Literal* key = property->key()->AsLiteral(); - ASSERT(key != NULL && key->handle()->IsString()); - Handle name = Handle::cast(key->handle()); - receiver_types_.Clear(); - oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); + if (property == NULL) { + // Function call. Specialize for monomorphic calls. + if (is_monomorphic_) target_ = oracle->GetCallTarget(this); + } else { + // Method call. Specialize for the receiver types seen at runtime. + Literal* key = property->key()->AsLiteral(); + ASSERT(key != NULL && key->handle()->IsString()); + Handle name = Handle::cast(key->handle()); + receiver_types_.Clear(); + oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); #ifdef DEBUG - if (FLAG_enable_slow_asserts) { - int length = receiver_types_.length(); - for (int i = 0; i < length; i++) { - Handle map = receiver_types_.at(i); - ASSERT(!map.is_null() && *map != NULL); + if (FLAG_enable_slow_asserts) { + int length = receiver_types_.length(); + for (int i = 0; i < length; i++) { + Handle map = receiver_types_.at(i); + ASSERT(!map.is_null() && *map != NULL); + } } - } #endif - is_monomorphic_ = oracle->CallIsMonomorphic(this); - check_type_ = oracle->GetCallCheckType(this); - if (is_monomorphic_) { - Handle map; - if (receiver_types_.length() > 0) { - ASSERT(check_type_ == RECEIVER_MAP_CHECK); - map = receiver_types_.at(0); - } else { - ASSERT(check_type_ != RECEIVER_MAP_CHECK); - holder_ = Handle( - oracle->GetPrototypeForPrimitiveCheck(check_type_)); - map = Handle(holder_->map()); + check_type_ = oracle->GetCallCheckType(this); + if (is_monomorphic_) { + Handle map; + if (receiver_types_.length() > 0) { + ASSERT(check_type_ == RECEIVER_MAP_CHECK); + map = receiver_types_.at(0); + } else { + ASSERT(check_type_ != RECEIVER_MAP_CHECK); + holder_ = Handle( + oracle->GetPrototypeForPrimitiveCheck(check_type_)); + map = Handle(holder_->map()); + } + is_monomorphic_ = ComputeTarget(map, name); } - is_monomorphic_ = ComputeTarget(map, name); } } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b56205f9a6..00cfd7fe6f 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -90,7 +90,6 @@ namespace internal { V(CountOperation) \ V(BinaryOperation) \ V(CompareOperation) \ - V(CompareToNull) \ V(ThisFunction) #define AST_NODE_LIST(V) \ @@ -289,6 +288,12 @@ class Expression: public AstNode { // True iff the expression is a literal represented as a smi. virtual bool IsSmiLiteral() { return false; } + // True iff the expression is a string literal. + virtual bool IsStringLiteral() { return false; } + + // True iff the expression is the null literal. + virtual bool IsNullLiteral() { return false; } + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -891,6 +896,8 @@ class Literal: public Expression { virtual bool IsTrivial() { return true; } virtual bool IsSmiLiteral() { return handle_->IsSmi(); } + virtual bool IsStringLiteral() { return handle_->IsString(); } + virtual bool IsNullLiteral() { return handle_->IsNull(); } // Check if this literal is identical to the other literal. bool IsIdenticalTo(const Literal* other) const { @@ -1465,6 +1472,7 @@ class CompareOperation: public Expression { // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle* check); bool IsLiteralCompareUndefined(Expression** expr); + bool IsLiteralCompareNull(Expression** expr); private: Token::Value op_; @@ -1477,25 +1485,6 @@ class CompareOperation: public Expression { }; -class CompareToNull: public Expression { - public: - CompareToNull(Isolate* isolate, bool is_strict, Expression* expression) - : Expression(isolate), is_strict_(is_strict), expression_(expression) { } - - DECLARE_NODE_TYPE(CompareToNull) - - virtual bool IsInlineable() const; - - bool is_strict() const { return is_strict_; } - Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } - Expression* expression() const { return expression_; } - - private: - bool is_strict_; - Expression* expression_; -}; - - class Conditional: public Expression { public: Conditional(Isolate* isolate, diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index f07e625ec0..dc722cb749 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -34,6 +34,7 @@ #include "debug.h" #include "execution.h" #include "global-handles.h" +#include "isolate-inl.h" #include "macro-assembler.h" #include "natives.h" #include "objects-visiting.h" @@ -995,6 +996,26 @@ void Genesis::InitializeGlobal(Handle inner_global, initial_map->instance_size() + 5 * kPointerSize); initial_map->set_instance_descriptors(*descriptors); initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map)); + + // RegExp prototype object is itself a RegExp. + Handle proto_map = factory->CopyMapDropTransitions(initial_map); + proto_map->set_prototype(global_context()->initial_object_prototype()); + Handle proto = factory->NewJSObjectFromMap(proto_map); + proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, + heap->empty_string()); + proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, + Smi::FromInt(0), + SKIP_WRITE_BARRIER); // It's a Smi. + initial_map->set_prototype(*proto); + factory->SetRegExpIrregexpData(Handle::cast(proto), + JSRegExp::IRREGEXP, factory->empty_string(), + JSRegExp::Flags(0), 0); } { // -- J S O N @@ -1076,6 +1097,11 @@ void Genesis::InitializeGlobal(Handle inner_global, elements->set(0, *array); array = factory->NewFixedArray(0); elements->set(1, *array); + Handle non_strict_arguments_elements_map = + factory->GetElementsTransitionMap(result, + NON_STRICT_ARGUMENTS_ELEMENTS); + result->set_map(*non_strict_arguments_elements_map); + ASSERT(result->HasNonStrictArgumentsElements()); result->set_elements(*elements); global_context()->set_aliased_arguments_boilerplate(*result); } @@ -1327,6 +1353,8 @@ void Genesis::InstallNativeFunctions() { configure_instance_fun); INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun); INSTALL_NATIVE(JSObject, "functionCache", function_cache); + INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor", + to_complete_property_descriptor); } void Genesis::InstallExperimentalNativeFunctions() { @@ -1555,6 +1583,18 @@ bool Genesis::InstallNatives() { isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); array_function->shared()->DontAdaptArguments(); + // InternalArrays should not use Smi-Only array optimizations. There are too + // many places in the C++ runtime code (e.g. RegEx) that assume that + // elements in InternalArrays can be set to non-Smi values without going + // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT + // transition easy to trap. Moreover, they rarely are smi-only. + MaybeObject* maybe_map = + array_function->initial_map()->CopyDropTransitions(); + Map* new_map; + if (!maybe_map->To(&new_map)) return maybe_map; + new_map->set_elements_kind(FAST_ELEMENTS); + array_function->set_initial_map(new_map); + // Make "length" magic on instances. Handle array_descriptors = factory()->CopyAppendForeignDescriptor( @@ -1938,14 +1978,15 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) { if (!InstallExtension(extension->dependencies()[i])) return false; } Isolate* isolate = Isolate::Current(); - Vector source = CStrVector(extension->source()); - Handle source_code = isolate->factory()->NewStringFromAscii(source); - bool result = CompileScriptCached(CStrVector(extension->name()), - source_code, - isolate->bootstrapper()->extensions_cache(), - extension, - Handle(isolate->context()), - false); + Handle source_code = + isolate->factory()->NewExternalStringFromAscii(extension->source()); + bool result = CompileScriptCached( + CStrVector(extension->name()), + source_code, + isolate->bootstrapper()->extensions_cache(), + extension, + Handle(isolate->context()), + false); ASSERT(isolate->has_pending_exception() != result); if (!result) { isolate->clear_pending_exception(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index e6a0699f07..31fcd68177 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -33,6 +33,7 @@ #include "builtins.h" #include "gdb-jit.h" #include "ic-inl.h" +#include "mark-compact.h" #include "vm-state-inl.h" namespace v8 { @@ -202,7 +203,7 @@ BUILTIN(ArrayCodeGeneric) { } // 'array' now contains the JSArray we should initialize. - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); // Optimize the case where there is one argument and the argument is a // small smi. @@ -215,7 +216,8 @@ BUILTIN(ArrayCodeGeneric) { { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - array->SetContent(FixedArray::cast(obj)); + MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); + if (maybe_obj->IsFailure()) return maybe_obj; return array; } } @@ -239,6 +241,11 @@ BUILTIN(ArrayCodeGeneric) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } + // Set length and elements on the array. + MaybeObject* maybe_object = + array->EnsureCanContainElements(FixedArray::cast(obj)); + if (maybe_object->IsFailure()) return maybe_object; + AssertNoAllocation no_gc; FixedArray* elms = FixedArray::cast(obj); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); @@ -247,7 +254,6 @@ BUILTIN(ArrayCodeGeneric) { elms->set(index, args[index+1], mode); } - // Set length and elements on the array. array->set_elements(FixedArray::cast(obj)); array->set_length(len); @@ -295,6 +301,7 @@ static void CopyElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } + heap->incremental_marking()->RecordWrites(dst); } @@ -313,6 +320,7 @@ static void MoveElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } + heap->incremental_marking()->RecordWrites(dst); } @@ -358,6 +366,14 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, former_start[to_trim] = heap->fixed_array_map(); former_start[to_trim + 1] = Smi::FromInt(len - to_trim); + // Maintain marking consistency for HeapObjectIterator and + // IncrementalMarking. + int size_delta = to_trim * kPointerSize; + if (heap->marking()->TransferMark(elms->address(), + elms->address() + size_delta)) { + MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta); + } + return FixedArray::cast(HeapObject::FromAddress( elms->address() + to_trim * kPointerSize)); } @@ -384,15 +400,36 @@ static bool ArrayPrototypeHasNoElements(Heap* heap, MUST_USE_RESULT static inline MaybeObject* EnsureJSArrayWithWritableFastElements( - Heap* heap, Object* receiver) { + Heap* heap, Object* receiver, Arguments* args, int first_added_arg) { if (!receiver->IsJSArray()) return NULL; JSArray* array = JSArray::cast(receiver); HeapObject* elms = array->elements(); - if (elms->map() == heap->fixed_array_map()) return elms; - if (elms->map() == heap->fixed_cow_array_map()) { - return array->EnsureWritableFastElements(); + Map* map = elms->map(); + if (map == heap->fixed_array_map()) { + if (args == NULL || !array->HasFastSmiOnlyElements()) { + return elms; + } + } else if (map == heap->fixed_cow_array_map()) { + MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); + if (args == NULL || !array->HasFastSmiOnlyElements() || + maybe_writable_result->IsFailure()) { + return maybe_writable_result; + } + } else { + return NULL; } - return NULL; + + // Need to ensure that the arguments passed in args can be contained in + // the array. + int args_length = args->length(); + if (first_added_arg >= args_length) return array->elements(); + + MaybeObject* maybe_array = array->EnsureCanContainElements( + args, + first_added_arg, + args_length - first_added_arg); + if (maybe_array->IsFailure()) return maybe_array; + return array->elements(); } @@ -413,20 +450,18 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( HandleScope handleScope(isolate); Handle js_builtin = - GetProperty(Handle( - isolate->global_context()->builtins()), - name); - ASSERT(js_builtin->IsJSFunction()); - Handle function(Handle::cast(js_builtin)); - ScopedVector argv(args.length() - 1); - int n_args = args.length() - 1; - for (int i = 0; i < n_args; i++) { - argv[i] = args.at(i + 1).location(); - } - bool pending_exception = false; + GetProperty(Handle(isolate->global_context()->builtins()), + name); + Handle function = Handle::cast(js_builtin); + int argc = args.length() - 1; + ScopedVector > argv(argc); + for (int i = 0; i < argc; ++i) { + argv[i] = args.at(i + 1); + } + bool pending_exception; Handle result = Execution::Call(function, args.receiver(), - n_args, + argc, argv.start(), &pending_exception); if (pending_exception) return Failure::Exception(); @@ -439,7 +474,7 @@ BUILTIN(ArrayPush) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); if (maybe_elms_obj == NULL) { return CallJsBuiltin(isolate, "ArrayPush", args); } @@ -475,7 +510,6 @@ BUILTIN(ArrayPush) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; - array->set_elements(elms); } // Add the provided values. @@ -485,6 +519,10 @@ BUILTIN(ArrayPush) { elms->set(index + len, args[index + 1], mode); } + if (elms != array->elements()) { + array->set_elements(elms); + } + // Set the length. array->set_length(Smi::FromInt(new_length)); return Smi::FromInt(new_length); @@ -496,7 +534,7 @@ BUILTIN(ArrayPop) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } @@ -529,7 +567,7 @@ BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayShift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -539,7 +577,7 @@ BUILTIN(ArrayShift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); @@ -551,9 +589,7 @@ BUILTIN(ArrayShift) { } if (!heap->lo_space()->Contains(elms)) { - // As elms still in the same space they used to be, - // there is no need to update region dirty mark. - array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER); + array->set_elements(LeftTrimFixedArray(heap, elms, 1)); } else { // Shift the elements. AssertNoAllocation no_gc; @@ -573,7 +609,7 @@ BUILTIN(ArrayUnshift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayUnshift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -583,7 +619,7 @@ BUILTIN(ArrayUnshift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -592,6 +628,10 @@ BUILTIN(ArrayUnshift) { // we should never hit this case. ASSERT(to_add <= (Smi::kMaxValue - len)); + MaybeObject* maybe_object = + array->EnsureCanContainElements(&args, 1, to_add); + if (maybe_object->IsFailure()) return maybe_object; + if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; @@ -600,13 +640,11 @@ BUILTIN(ArrayUnshift) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* new_elms = FixedArray::cast(obj); - AssertNoAllocation no_gc; if (len > 0) { CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len); } FillWithHoles(heap, new_elms, new_length, capacity); - elms = new_elms; array->set_elements(elms); } else { @@ -634,7 +672,7 @@ BUILTIN(ArraySlice) { int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastElements() || + if (!array->HasFastTypeElements() || !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -650,7 +688,7 @@ BUILTIN(ArraySlice) { bool is_arguments_object_with_fast_elements = receiver->IsJSObject() && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastElements(); + && JSObject::cast(receiver)->HasFastTypeElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -721,6 +759,10 @@ BUILTIN(ArraySlice) { } FixedArray* result_elms = FixedArray::cast(result); + MaybeObject* maybe_object = + result_array->EnsureCanContainElements(result_elms); + if (maybe_object->IsFailure()) return maybe_object; + AssertNoAllocation no_gc; CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len); @@ -738,7 +780,7 @@ BUILTIN(ArraySplice) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArraySplice", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -748,7 +790,7 @@ BUILTIN(ArraySplice) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); @@ -825,9 +867,9 @@ BUILTIN(ArraySplice) { } int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actual_delete_count + item_count; + bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. const bool trim_array = !heap->lo_space()->Contains(elms) && @@ -842,7 +884,8 @@ BUILTIN(ArraySplice) { } elms = LeftTrimFixedArray(heap, elms, delta); - array->set_elements(elms, SKIP_WRITE_BARRIER); + + elms_changed = true; } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -882,7 +925,7 @@ BUILTIN(ArraySplice) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; - array->set_elements(elms); + elms_changed = true; } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -898,6 +941,10 @@ BUILTIN(ArraySplice) { elms->set(k, args[3 + k - actual_start], mode); } + if (elms_changed) { + array->set_elements(elms); + } + // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -920,7 +967,7 @@ BUILTIN(ArrayConcat) { int result_len = 0; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() + if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } @@ -956,6 +1003,17 @@ BUILTIN(ArrayConcat) { } FixedArray* result_elms = FixedArray::cast(result); + // Ensure element type transitions happen before copying elements in. + if (result_array->HasFastSmiOnlyElements()) { + for (int i = 0; i < n_arguments; i++) { + JSArray* array = JSArray::cast(args[i]); + if (!array->HasFastSmiOnlyElements()) { + result_array->EnsureCanContainNonSmiElements(); + break; + } + } + } + // Copy data. AssertNoAllocation no_gc; int start_pos = 0; @@ -1607,20 +1665,22 @@ void Builtins::Setup(bool create_heap_objects) { const BuiltinDesc* functions = BuiltinFunctionTable::functions(); // For now we generate builtin adaptor code into a stack-allocated - // buffer, before copying it into individual code objects. - byte buffer[4*KB]; + // buffer, before copying it into individual code objects. Be careful + // with alignment, some platforms don't like unaligned code. + union { int force_alignment; byte buffer[4*KB]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. for (int i = 0; i < builtin_count; i++) { if (create_heap_objects) { - MacroAssembler masm(isolate, buffer, sizeof buffer); + MacroAssembler masm(isolate, u.buffer, sizeof u.buffer); // Generate the code/adaptor. typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments); Generator g = FUNCTION_CAST(functions[i].generator); // We pass all arguments to the generator, but it may not use all of // them. This works because the first arguments are on top of the // stack. + ASSERT(!masm.has_frame()); g(&masm, functions[i].name, functions[i].extra_args); // Move the code into the object heap. CodeDesc desc; diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc index 30a67a661b..9241d26582 100644 --- a/deps/v8/src/cached-powers.cc +++ b/deps/v8/src/cached-powers.cc @@ -134,14 +134,12 @@ static const CachedPower kCachedPowers[] = { }; static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers); -static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent; +static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent. static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10) -const int PowersOfTenCache::kDecimalExponentDistance = - kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent; -const int PowersOfTenCache::kMinDecimalExponent = - kCachedPowers[0].decimal_exponent; -const int PowersOfTenCache::kMaxDecimalExponent = - kCachedPowers[kCachedPowersLength - 1].decimal_exponent; +// Difference between the decimal exponents in the table above. +const int PowersOfTenCache::kDecimalExponentDistance = 8; +const int PowersOfTenCache::kMinDecimalExponent = -348; +const int PowersOfTenCache::kMaxDecimalExponent = 340; void PowersOfTenCache::GetCachedPowerForBinaryExponentRange( int min_exponent, diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 00da4cba62..4bc2603c53 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -52,11 +52,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. masm->isolate()->counters()->code_stubs()->Increment(); - // Nested stubs are not allowed for leafs. - AllowStubCallsScope allow_scope(masm, AllowsStubCalls()); + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(masm, false); // Generate the code for the stub. masm->set_generating_stub(true); + NoCurrentFrameScope scope(masm); Generate(masm); } @@ -127,8 +128,10 @@ Handle CodeStub::GetCode() { GetKey(), new_object); heap->public_set_code_stubs(*dict); - code = *new_object; + Activate(code); + } else { + CHECK(IsPregenerated() == code->is_pregenerated()); } ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); @@ -166,7 +169,11 @@ MaybeObject* CodeStub::TryGetCode() { heap->code_stubs()->AtNumberPut(GetKey(), code); if (maybe_new_object->ToObject(&new_object)) { heap->public_set_code_stubs(NumberDictionary::cast(new_object)); + } else if (MustBeInStubCache()) { + return maybe_new_object; } + + Activate(code); } return code; @@ -188,6 +195,11 @@ const char* CodeStub::MajorName(CodeStub::Major major_key, } +void CodeStub::PrintName(StringStream* stream) { + stream->Add("%s", MajorName(MajorKey(), false)); +} + + int ICCompareStub::MinorKey() { return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); } @@ -245,6 +257,7 @@ void InstanceofStub::PrintName(StringStream* stream) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastElement(masm); break; case FAST_DOUBLE_ELEMENTS: @@ -274,7 +287,11 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_); + case FAST_SMI_ONLY_ELEMENTS: { + KeyedStoreStubCompiler::GenerateStoreFastElement(masm, + is_js_array_, + elements_kind_); + } break; case FAST_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, @@ -302,24 +319,20 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::PrintName(StringStream* stream) { - const char* type_name = NULL; // Make g++ happy. + stream->Add("ArgumentsAccessStub_"); switch (type_) { - case READ_ELEMENT: type_name = "ReadElement"; break; - case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break; - case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break; - case NEW_STRICT: type_name = "NewStrict"; break; + case READ_ELEMENT: stream->Add("ReadElement"); break; + case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break; + case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break; + case NEW_STRICT: stream->Add("NewStrict"); break; } - stream->Add("ArgumentsAccessStub_%s", type_name); } void CallFunctionStub::PrintName(StringStream* stream) { - const char* flags_name = NULL; // Make g++ happy. - switch (flags_) { - case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break; - case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break; - } - stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name); + stream->Add("CallFunctionStub_Args%d", argc_); + if (ReceiverMightBeImplicit()) stream->Add("_Implicit"); + if (RecordCallTarget()) stream->Add("_Recording"); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 64c89b93d1..acfbd469f0 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -45,27 +45,23 @@ namespace internal { V(Compare) \ V(CompareIC) \ V(MathPow) \ + V(RecordWrite) \ + V(StoreBufferOverflow) \ + V(RegExpExec) \ V(TranscendentalCache) \ V(Instanceof) \ - /* All stubs above this line only exist in a few versions, which are */ \ - /* generated ahead of time. Therefore compiling a call to one of */ \ - /* them can't cause a new stub to be compiled, so compiling a call to */ \ - /* them is GC safe. The ones below this line exist in many variants */ \ - /* so code compiling a call to one can cause a GC. This means they */ \ - /* can't be called from other stubs, since stub generation code is */ \ - /* not GC safe. */ \ V(ConvertToDouble) \ V(WriteInt32ToHeapNumber) \ V(StackCheck) \ V(FastNewClosure) \ V(FastNewContext) \ + V(FastNewBlockContext) \ V(FastCloneShallowArray) \ V(RevertToNumber) \ V(ToBoolean) \ V(ToNumber) \ V(CounterOp) \ V(ArgumentsAccess) \ - V(RegExpExec) \ V(RegExpConstructResult) \ V(NumberToString) \ V(CEntry) \ @@ -73,7 +69,7 @@ namespace internal { V(KeyedLoadElement) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ - V(StringDictionaryNegativeLookup) + V(StringDictionaryLookup) // List of code stubs only used on ARM platforms. #ifdef V8_TARGET_ARCH_ARM @@ -142,6 +138,27 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} + bool CompilingCallsToThisStubIsGCSafe() { + bool is_pregenerated = IsPregenerated(); + Code* code = NULL; + CHECK(!is_pregenerated || FindCodeInCache(&code)); + return is_pregenerated; + } + + // See comment above, where Instanceof is defined. + virtual bool IsPregenerated() { return false; } + + static void GenerateStubsAheadOfTime(); + static void GenerateFPStubs(); + + // Some stubs put untagged junk on the stack that cannot be scanned by the + // GC. This means that we must be statically sure that no GC can occur while + // they are running. If that is the case they should override this to return + // true, which will cause an assertion if we try to call something that can + // GC or if we try to put a stack frame on top of the junk, which would not + // result in a traversable stack. + virtual bool SometimesSetsUpAFrame() { return true; } + protected: static const int kMajorBits = 6; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; @@ -164,6 +181,14 @@ class CodeStub BASE_EMBEDDED { // Finish the code object after it has been generated. virtual void FinishCode(Code* code) { } + // Returns true if TryGetCode should fail if it failed + // to register newly generated stub in the stub cache. + virtual bool MustBeInStubCache() { return false; } + + // Activate newly generated stub. Is called after + // registering stub in the stub cache. + virtual void Activate(Code* code) { } + // Returns information for computing the number key. virtual Major MajorKey() = 0; virtual int MinorKey() = 0; @@ -178,9 +203,7 @@ class CodeStub BASE_EMBEDDED { // Returns a name for logging/debugging purposes. SmartArrayPointer GetName(); - virtual void PrintName(StringStream* stream) { - stream->Add("%s", MajorName(MajorKey(), false)); - } + virtual void PrintName(StringStream* stream); // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. @@ -193,9 +216,6 @@ class CodeStub BASE_EMBEDDED { MajorKeyBits::encode(MajorKey()); } - // See comment above, where Instanceof is defined. - bool AllowsStubCalls() { return MajorKey() <= Instanceof; } - class MajorKeyBits: public BitField {}; class MinorKeyBits: public BitField {}; @@ -304,7 +324,7 @@ class FastNewContextStub : public CodeStub { static const int kMaximumSlots = 64; explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots <= kMaximumSlots); + ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); } void Generate(MacroAssembler* masm); @@ -317,6 +337,24 @@ class FastNewContextStub : public CodeStub { }; +class FastNewBlockContextStub : public CodeStub { + public: + static const int kMaximumSlots = 64; + + explicit FastNewBlockContextStub(int slots) : slots_(slots) { + ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); + } + + void Generate(MacroAssembler* masm); + + private: + int slots_; + + Major MajorKey() { return FastNewBlockContext; } + int MinorKey() { return slots_; } +}; + + class FastCloneShallowArrayStub : public CodeStub { public: // Maximum length of copied elements array. @@ -531,11 +569,18 @@ class CompareStub: public CodeStub { class CEntryStub : public CodeStub { public: - explicit CEntryStub(int result_size) - : result_size_(result_size), save_doubles_(false) { } + explicit CEntryStub(int result_size, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) + : result_size_(result_size), save_doubles_(save_doubles) { } void Generate(MacroAssembler* masm); - void SaveDoubles() { save_doubles_ = true; } + + // The version of this stub that doesn't save doubles is generated ahead of + // time, so it's OK to call it from other stubs that can't cope with GC during + // their code generation. On machines that always have gp registers (x64) we + // can generate both variants ahead of time. + virtual bool IsPregenerated(); + static void GenerateAheadOfTime(); private: void GenerateCore(MacroAssembler* masm, @@ -550,7 +595,7 @@ class CEntryStub : public CodeStub { // Number of pointers/values returned. const int result_size_; - bool save_doubles_; + SaveFPRegsMode save_doubles_; Major MajorKey() { return CEntry; } int MinorKey(); @@ -647,10 +692,32 @@ class CallFunctionStub: public CodeStub { void Generate(MacroAssembler* masm); + virtual void FinishCode(Code* code); + + static void Clear(Heap* heap, Address address); + + static Object* GetCachedValue(Address address); + static int ExtractArgcFromMinorKey(int minor_key) { return ArgcBits::decode(minor_key); } + // The object that indicates an uninitialized cache. + static Handle UninitializedSentinel(Isolate* isolate) { + return isolate->factory()->the_hole_value(); + } + + // A raw version of the uninitialized sentinel that's safe to read during + // garbage collection (e.g., for patching the cache). + static Object* RawUninitializedSentinel(Heap* heap) { + return heap->raw_unchecked_the_hole_value(); + } + + // The object that indicates a megamorphic state. + static Handle MegamorphicSentinel(Isolate* isolate) { + return isolate->factory()->undefined_value(); + } + private: int argc_; CallFunctionFlags flags_; @@ -658,8 +725,8 @@ class CallFunctionStub: public CodeStub { virtual void PrintName(StringStream* stream); // Minor key encoding in 32 bits with Bitfield . - class FlagBits: public BitField {}; - class ArgcBits: public BitField {}; + class FlagBits: public BitField {}; + class ArgcBits: public BitField {}; Major MajorKey() { return CallFunction; } int MinorKey() { @@ -670,6 +737,10 @@ class CallFunctionStub: public CodeStub { bool ReceiverMightBeImplicit() { return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0; } + + bool RecordCallTarget() { + return (flags_ & RECORD_CALL_TARGET) != 0; + } }; @@ -934,6 +1005,8 @@ class ToBooleanStub: public CodeStub { virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; } virtual void PrintName(StringStream* stream); + virtual bool SometimesSetsUpAFrame() { return false; } + private: Major MajorKey() { return ToBoolean; } int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index cdc9ba1553..ceea7b9fea 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { int CEntryStub::MinorKey() { + int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; ASSERT(result_size_ == 1 || result_size_ == 2); - int result = save_doubles_ ? 1 : 0; #ifdef _WIN64 return result | ((result_size_ == 1) ? 0 : 2); #else diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h new file mode 100644 index 0000000000..3b9c59ea53 --- /dev/null +++ b/deps/v8/src/compiler-intrinsics.h @@ -0,0 +1,77 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_COMPILER_INTRINSICS_H_ +#define V8_COMPILER_INTRINSICS_H_ + +namespace v8 { +namespace internal { + +class CompilerIntrinsics { + public: + // Returns number of zero bits preceding least significant 1 bit. + // Undefined for zero value. + INLINE(static int CountTrailingZeros(uint32_t value)); + + // Returns number of zero bits following most significant 1 bit. + // Undefined for zero value. + INLINE(static int CountLeadingZeros(uint32_t value)); +}; + +#ifdef __GNUC__ +int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { + return __builtin_ctz(value); +} + +int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { + return __builtin_clz(value); +} + +#elif defined(_MSC_VER) + +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) + +int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { + unsigned long result; //NOLINT + _BitScanForward(&result, static_cast(value)); //NOLINT + return static_cast(result); +} + +int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { + unsigned long result; //NOLINT + _BitScanReverse(&result, static_cast(value)); //NOLINT + return 31 - static_cast(result); +} + +#else +#error Unsupported compiler +#endif + +} } // namespace v8::internal + +#endif // V8_COMPILER_INTRINSICS_H_ diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ba6bb42bfa..7584fd25a0 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -36,6 +36,7 @@ #include "full-codegen.h" #include "gdb-jit.h" #include "hydrogen.h" +#include "isolate-inl.h" #include "lithium.h" #include "liveedit.h" #include "parser.h" @@ -275,7 +276,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } Handle global_context(info->closure()->context()->global_context()); - TypeFeedbackOracle oracle(code, global_context); + TypeFeedbackOracle oracle(code, global_context, info->isolate()); HGraphBuilder builder(info, &oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(); diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 4f93abdff1..007d30de3f 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) { Handle Context::Lookup(Handle name, ContextLookupFlags flags, - int* index_, + int* index, PropertyAttributes* attributes, BindingFlags* binding_flags) { Isolate* isolate = GetIsolate(); Handle context(this, isolate); bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; - *index_ = -1; + *index = -1; *attributes = ABSENT; *binding_flags = MISSING_BINDING; @@ -110,70 +110,50 @@ Handle Context::Lookup(Handle name, PrintF("\n"); } - // Check extension/with/global object. - if (!context->IsBlockContext() && context->has_extension()) { - if (context->IsCatchContext()) { - // Catch contexts have the variable name in the extension slot. - if (name->Equals(String::cast(context->extension()))) { - if (FLAG_trace_contexts) { - PrintF("=> found in catch context\n"); - } - *index_ = Context::THROWN_OBJECT_INDEX; - *attributes = NONE; - *binding_flags = MUTABLE_IS_INITIALIZED; - return context; - } + // 1. Check global objects, subjects of with, and extension objects. + if (context->IsGlobalContext() || + context->IsWithContext() || + (context->IsFunctionContext() && context->has_extension())) { + Handle object(JSObject::cast(context->extension()), isolate); + // Context extension objects needs to behave as if they have no + // prototype. So even if we want to follow prototype chains, we need + // to only do a local lookup for context extension objects. + if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || + object->IsJSContextExtensionObject()) { + *attributes = object->GetLocalPropertyAttribute(*name); } else { - ASSERT(context->IsGlobalContext() || - context->IsFunctionContext() || - context->IsWithContext()); - // Global, function, and with contexts may have an object in the - // extension slot. - Handle extension(JSObject::cast(context->extension()), - isolate); - // Context extension objects needs to behave as if they have no - // prototype. So even if we want to follow prototype chains, we - // need to only do a local lookup for context extension objects. - if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || - extension->IsJSContextExtensionObject()) { - *attributes = extension->GetLocalPropertyAttribute(*name); - } else { - *attributes = extension->GetPropertyAttribute(*name); - } - if (*attributes != ABSENT) { - // property found - if (FLAG_trace_contexts) { - PrintF("=> found property in context object %p\n", - reinterpret_cast(*extension)); - } - return extension; + *attributes = object->GetPropertyAttribute(*name); + } + if (*attributes != ABSENT) { + if (FLAG_trace_contexts) { + PrintF("=> found property in context object %p\n", + reinterpret_cast(*object)); } + return object; } } - // Check serialized scope information of functions and blocks. Only - // functions can have parameters, and a function name. + // 2. Check the context proper if it has slots. if (context->IsFunctionContext() || context->IsBlockContext()) { - // We may have context-local slots. Check locals in the context. + // Use serialized scope information of functions and blocks to search + // for the context index. Handle scope_info; if (context->IsFunctionContext()) { scope_info = Handle( context->closure()->shared()->scope_info(), isolate); } else { - ASSERT(context->IsBlockContext()); scope_info = Handle( SerializedScopeInfo::cast(context->extension()), isolate); } - Variable::Mode mode; - int index = scope_info->ContextSlotIndex(*name, &mode); - ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); - if (index >= 0) { + int slot_index = scope_info->ContextSlotIndex(*name, &mode); + ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); + if (slot_index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found local in context slot %d (mode = %d)\n", - index, mode); + slot_index, mode); } - *index_ = index; + *index = slot_index; // Note: Fixed context slots are statically allocated by the compiler. // Statically allocated variables always have a statically known mode, // which is the mode with which they were declared when added to the @@ -206,22 +186,34 @@ Handle Context::Lookup(Handle name, // Check the slot corresponding to the intermediate context holding // only the function name variable. - if (follow_context_chain) { - int index = scope_info->FunctionContextSlotIndex(*name); - if (index >= 0) { + if (follow_context_chain && context->IsFunctionContext()) { + int function_index = scope_info->FunctionContextSlotIndex(*name); + if (function_index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found intermediate function in context slot %d\n", - index); + function_index); } - *index_ = index; + *index = function_index; *attributes = READ_ONLY; *binding_flags = IMMUTABLE_IS_INITIALIZED; return context; } } + + } else if (context->IsCatchContext()) { + // Catch contexts have the variable name in the extension slot. + if (name->Equals(String::cast(context->extension()))) { + if (FLAG_trace_contexts) { + PrintF("=> found in catch context\n"); + } + *index = Context::THROWN_OBJECT_INDEX; + *attributes = NONE; + *binding_flags = MUTABLE_IS_INITIALIZED; + return context; + } } - // Proceed with the previous context. + // 3. Prepare to continue with the previous (next outermost) context. if (context->IsGlobalContext()) { follow_context_chain = false; } else { diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 505f86c8ca..b80475f0f7 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -134,6 +134,8 @@ enum BindingFlags { V(MAP_CACHE_INDEX, Object, map_cache) \ V(CONTEXT_DATA_INDEX, Object, data) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ + V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ + to_complete_property_descriptor) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) @@ -252,6 +254,7 @@ class Context: public FixedArray { OUT_OF_MEMORY_INDEX, CONTEXT_DATA_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX, + TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, DERIVED_HAS_TRAP_INDEX, DERIVED_GET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX, @@ -330,12 +333,6 @@ class Context: public FixedArray { // Mark the global context with out of memory. inline void mark_out_of_memory(); - // The exception holder is the object used as a with object in - // the implementation of a catch block. - bool is_exception_holder(Object* object) { - return IsCatchContext() && extension() == object; - } - // A global context hold a list of all functions which have been optimized. void AddOptimizedFunction(JSFunction* function); void RemoveOptimizedFunction(JSFunction* function); @@ -355,29 +352,25 @@ class Context: public FixedArray { #undef GLOBAL_CONTEXT_FIELD_ACCESSORS // Lookup the the slot called name, starting with the current context. - // There are 4 possible outcomes: - // - // 1) index_ >= 0 && result->IsContext(): - // most common case, the result is a Context, and index is the - // context slot index, and the slot exists. - // attributes == READ_ONLY for the function name variable, NONE otherwise. + // There are three possibilities: // - // 2) index_ >= 0 && result->IsJSObject(): - // the result is the JSObject arguments object, the index is the parameter - // index, i.e., key into the arguments object, and the property exists. - // attributes != ABSENT. + // 1) result->IsContext(): + // The binding was found in a context. *index is always the + // non-negative slot index. *attributes is NONE for var and let + // declarations, READ_ONLY for const declarations (never ABSENT). // - // 3) index_ < 0 && result->IsJSObject(): - // the result is the JSObject extension context or the global object, - // and the name is the property name, and the property exists. - // attributes != ABSENT. + // 2) result->IsJSObject(): + // The binding was found as a named property in a context extension + // object (i.e., was introduced via eval), as a property on the subject + // of with, or as a property of the global object. *index is -1 and + // *attributes is not ABSENT. // - // 4) index_ < 0 && result.is_null(): - // there was no context found with the corresponding property. - // attributes == ABSENT. + // 3) result.is_null(): + // There was no binding found, *index is always -1 and *attributes is + // always ABSENT. Handle Lookup(Handle name, ContextLookupFlags flags, - int* index_, + int* index, PropertyAttributes* attributes, BindingFlags* binding_flags); diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index 41cf0d54c2..8bc11bf83d 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -47,7 +47,7 @@ namespace v8 { namespace internal { static inline double JunkStringValue() { - return std::numeric_limits::quiet_NaN(); + return BitCast(kQuietNaNMask); } diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index e51ad6501c..31aaf6b737 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -28,8 +28,6 @@ #ifndef V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_ -#include - #include "utils.h" namespace v8 { diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 65490285e7..d74c034ac5 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -551,12 +551,12 @@ void CpuProfiler::StopProcessor() { sampler->Stop(); need_to_stop_sampler_ = false; } + NoBarrier_Store(&is_profiling_, false); processor_->Stop(); processor_->Join(); delete processor_; delete generator_; processor_ = NULL; - NoBarrier_Store(&is_profiling_, false); generator_ = NULL; logger->logging_nesting_ = saved_logging_nesting_; } diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index adefba7322..8fbc876dab 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,6 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifdef ENABLE_DEBUGGER_SUPPORT #include "d8.h" #include "d8-debug.h" @@ -367,3 +368,5 @@ void KeyboardThread::Run() { } // namespace v8 + +#endif // ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 55f0d4c2ab..a516576faf 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle source, Handle name, bool print_result, bool report_exceptions) { -#ifndef V8_SHARED +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) bool FLAG_debugger = i::FLAG_debugger; #else bool FLAG_debugger = false; -#endif // V8_SHARED +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT HandleScope handle_scope; TryCatch try_catch; options.script_executed = true; @@ -594,6 +594,7 @@ void Shell::InstallUtilityScript() { Context::Scope utility_scope(utility_context_); #ifdef ENABLE_DEBUGGER_SUPPORT + if (i::FLAG_debugger) printf("JavaScript debugger enabled\n"); // Install the debugger object in the utility scope i::Debug* debug = i::Isolate::Current()->debug(); debug->Load(); @@ -816,7 +817,7 @@ void Shell::OnExit() { static FILE* FOpen(const char* path, const char* mode) { -#if (defined(_WIN32) || defined(_WIN64)) +#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64)) FILE* result; if (fopen_s(&result, path, mode) == 0) { return result; @@ -900,9 +901,6 @@ void Shell::RunShell() { #ifndef V8_SHARED console = LineEditor::Get(); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); - if (i::FLAG_debugger) { - printf("JavaScript debugger enabled\n"); - } console->Open(); while (true) { i::SmartArrayPointer input = console->Prompt(Shell::kPrompt); @@ -1253,14 +1251,22 @@ int Shell::RunMain(int argc, char* argv[]) { Locker lock; HandleScope scope; Persistent context = CreateEvaluationContext(); + if (options.last_run) { + // Keep using the same context in the interactive shell. + evaluation_context_ = context; +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) + // If the interactive debugger is enabled make sure to activate + // it before running the files passed on the command line. + if (i::FLAG_debugger) { + InstallUtilityScript(); + } +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT + } { Context::Scope cscope(context); options.isolate_sources[0].Execute(); } - if (options.last_run) { - // Keep using the same context in the interactive shell - evaluation_context_ = context; - } else { + if (!options.last_run) { context.Dispose(); } @@ -1331,9 +1337,11 @@ int Shell::Main(int argc, char* argv[]) { if (( options.interactive_shell || !options.script_executed ) && !options.test_shell ) { -#ifndef V8_SHARED - InstallUtilityScript(); -#endif // V8_SHARED +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) + if (!i::FLAG_debugger) { + InstallUtilityScript(); + } +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT RunShell(); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index a229d39c3e..3d79485b57 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -40,6 +40,7 @@ #include "global-handles.h" #include "ic.h" #include "ic-inl.h" +#include "isolate-inl.h" #include "list.h" #include "messages.h" #include "natives.h" @@ -401,15 +402,15 @@ void BreakLocationIterator::PrepareStepIn() { // Step in can only be prepared if currently positioned on an IC call, // construct call or CallFunction stub call. Address target = rinfo()->target_address(); - Handle code(Code::GetCodeFromTargetAddress(target)); - if (code->is_call_stub() || code->is_keyed_call_stub()) { + Handle target_code(Code::GetCodeFromTargetAddress(target)); + if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) { // Step in through IC call is handled by the runtime system. Therefore make // sure that the any current IC is cleared and the runtime system is // called. If the executing code has a debug break at the location change // the call in the original code as it is the code there that will be // executed in place of the debug break call. - Handle stub = ComputeCallDebugPrepareStepIn(code->arguments_count(), - code->kind()); + Handle stub = ComputeCallDebugPrepareStepIn( + target_code->arguments_count(), target_code->kind()); if (IsDebugBreak()) { original_rinfo()->set_target_address(stub->entry()); } else { @@ -419,7 +420,7 @@ void BreakLocationIterator::PrepareStepIn() { #ifdef DEBUG // All the following stuff is needed only for assertion checks so the code // is wrapped in ifdef. - Handle maybe_call_function_stub = code; + Handle maybe_call_function_stub = target_code; if (IsDebugBreak()) { Address original_target = original_rinfo()->target_address(); maybe_call_function_stub = @@ -436,8 +437,9 @@ void BreakLocationIterator::PrepareStepIn() { // Step in through CallFunction stub should also be prepared by caller of // this function (Debug::PrepareStep) which should flood target function // with breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub() - || is_call_function_stub); + ASSERT(RelocInfo::IsConstructCall(rmode()) || + target_code->is_inline_cache_stub() || + is_call_function_stub); #endif } } @@ -474,11 +476,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() { RelocInfo::Mode mode = rmode(); if (RelocInfo::IsCodeTarget(mode)) { Address target = rinfo()->target_address(); - Handle code(Code::GetCodeFromTargetAddress(target)); + Handle target_code(Code::GetCodeFromTargetAddress(target)); // Patch the code to invoke the builtin debug break function matching the // calling convention used by the call site. - Handle dbgbrk_code(Debug::FindDebugBreak(code, mode)); + Handle dbgbrk_code(Debug::FindDebugBreak(target_code, mode)); rinfo()->set_target_address(dbgbrk_code->entry()); } } @@ -772,7 +774,7 @@ bool Debug::CompileDebuggerScript(int index) { // Execute the shared function in the debugger context. Handle context = isolate->global_context(); - bool caught_exception = false; + bool caught_exception; Handle function = factory->NewFunctionFromSharedFunctionInfo(function_info, context); @@ -1103,14 +1105,13 @@ bool Debug::CheckBreakPoint(Handle break_point_object) { Handle break_id = factory->NewNumberFromInt(Debug::break_id()); // Call HandleBreakPointx. - bool caught_exception = false; - const int argc = 2; - Object** argv[argc] = { - break_id.location(), - reinterpret_cast(break_point_object.location()) - }; + bool caught_exception; + Handle argv[] = { break_id, break_point_object }; Handle result = Execution::TryCall(check_break_point, - isolate_->js_builtins_object(), argc, argv, &caught_exception); + isolate_->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); // If exception or non boolean result handle as not triggered if (caught_exception || !result->IsBoolean()) { @@ -1732,6 +1733,10 @@ void Debug::PrepareForBreakPoints() { if (!has_break_points_) { Deoptimizer::DeoptimizeAll(); + // We are going to iterate heap to find all functions without + // debug break slots. + isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); + AssertNoAllocation no_allocation; Builtins* builtins = isolate_->builtins(); Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile); @@ -1997,9 +2002,10 @@ void Debug::CreateScriptCache() { // Perform two GCs to get rid of all unreferenced scripts. The first GC gets // rid of all the cached script wrappers and the second gets rid of the - // scripts which are no longer referenced. - heap->CollectAllGarbage(false); - heap->CollectAllGarbage(false); + // scripts which are no longer referenced. The second also sweeps precisely, + // which saves us doing yet another GC to make the heap iterable. + heap->CollectAllGarbage(Heap::kNoGCFlags); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask); ASSERT(script_cache_ == NULL); script_cache_ = new ScriptCache(); @@ -2007,6 +2013,8 @@ void Debug::CreateScriptCache() { // Scan heap for Script objects. int count = 0; HeapIterator iterator; + AssertNoAllocation no_allocation; + for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { script_cache_->Add(Handle