From 50464cd4f49e40f4fe792ff46a81052319a222e9 Mon Sep 17 00:00:00 2001 From: Bert Belder Date: Wed, 13 Jun 2012 15:34:45 +0200 Subject: [PATCH] v8: upgrade to v3.11.10 --- deps/v8/AUTHORS | 1 + deps/v8/ChangeLog | 311 ++ deps/v8/DEPS | 27 + deps/v8/Makefile | 69 +- deps/v8/SConstruct | 4 +- deps/v8/build/armu.gypi | 36 - deps/v8/build/common.gypi | 268 +- deps/v8/build/gyp_v8 | 2 +- deps/v8/build/mipsu.gypi | 33 - deps/v8/build/standalone.gypi | 12 +- deps/v8/include/v8-profiler.h | 85 +- deps/v8/include/v8.h | 251 +- deps/v8/samples/lineprocessor.cc | 6 +- deps/v8/samples/samples.gyp | 8 +- deps/v8/samples/shell.cc | 25 +- deps/v8/src/SConscript | 1 + deps/v8/src/allocation-inl.h | 4 +- deps/v8/src/allocation.h | 14 +- deps/v8/src/api.cc | 175 +- deps/v8/src/api.h | 11 +- deps/v8/src/apiutils.h | 9 +- deps/v8/src/arguments.h | 13 +- deps/v8/src/arm/builtins-arm.cc | 9 +- deps/v8/src/arm/code-stubs-arm.cc | 146 +- deps/v8/src/arm/codegen-arm.cc | 4 +- deps/v8/src/arm/debug-arm.cc | 4 +- deps/v8/src/arm/full-codegen-arm.cc | 293 +- deps/v8/src/arm/ic-arm.cc | 61 +- deps/v8/src/arm/lithium-arm.cc | 210 +- deps/v8/src/arm/lithium-arm.h | 116 +- deps/v8/src/arm/lithium-codegen-arm.cc | 548 +- deps/v8/src/arm/lithium-codegen-arm.h | 41 +- deps/v8/src/arm/lithium-gap-resolver-arm.cc | 4 +- deps/v8/src/arm/macro-assembler-arm.cc | 127 +- deps/v8/src/arm/macro-assembler-arm.h | 27 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 244 +- deps/v8/src/arm/regexp-macro-assembler-arm.h | 24 +- deps/v8/src/arm/simulator-arm.h | 12 +- deps/v8/src/arm/stub-cache-arm.cc | 342 +- deps/v8/src/array.js | 166 +- deps/v8/src/assembler.cc | 95 +- deps/v8/src/assembler.h | 7 + deps/v8/src/ast.cc | 269 +- deps/v8/src/ast.h | 81 +- deps/v8/src/bootstrapper.cc | 38 +- deps/v8/src/builtins.cc | 153 +- deps/v8/src/builtins.h | 1 - deps/v8/src/bytecodes-irregexp.h | 35 +- deps/v8/src/code-stubs.cc | 51 +- deps/v8/src/code-stubs.h | 1 + deps/v8/src/codegen.h | 6 +- deps/v8/src/compiler-intrinsics.h | 17 + deps/v8/src/compiler.cc | 21 +- deps/v8/src/contexts.h | 22 +- deps/v8/src/conversions-inl.h | 4 +- deps/v8/src/d8.cc | 245 +- deps/v8/src/d8.h | 6 +- deps/v8/src/d8.js | 2 +- deps/v8/src/dateparser-inl.h | 3 + deps/v8/src/debug-agent.cc | 38 +- deps/v8/src/debug-debugger.js | 57 +- deps/v8/src/debug.cc | 86 +- deps/v8/src/debug.h | 55 +- deps/v8/src/deoptimizer.cc | 68 +- deps/v8/src/deoptimizer.h | 24 +- deps/v8/src/double.h | 6 - deps/v8/src/elements-kind.cc | 134 + deps/v8/src/elements-kind.h | 221 + deps/v8/src/elements.cc | 681 ++- deps/v8/src/elements.h | 65 +- .../externalize-string-extension.cc | 7 +- deps/v8/src/extensions/gc-extension.cc | 5 +- deps/v8/src/factory.cc | 80 +- deps/v8/src/factory.h | 30 +- deps/v8/src/flag-definitions.h | 16 +- deps/v8/src/frames.cc | 54 +- deps/v8/src/frames.h | 12 +- deps/v8/src/full-codegen.cc | 141 +- deps/v8/src/full-codegen.h | 54 +- deps/v8/src/func-name-inferrer.cc | 15 +- deps/v8/src/func-name-inferrer.h | 10 +- deps/v8/src/handles.cc | 6 +- deps/v8/src/hashmap.h | 102 +- deps/v8/src/heap-inl.h | 27 +- deps/v8/src/heap-profiler.cc | 50 +- deps/v8/src/heap-profiler.h | 13 +- deps/v8/src/heap.cc | 325 +- deps/v8/src/heap.h | 59 +- deps/v8/src/hydrogen-instructions.cc | 249 +- deps/v8/src/hydrogen-instructions.h | 466 +- deps/v8/src/hydrogen.cc | 2015 +++++-- deps/v8/src/hydrogen.h | 147 +- deps/v8/src/ia32/assembler-ia32.h | 3 + deps/v8/src/ia32/builtins-ia32.cc | 20 +- deps/v8/src/ia32/code-stubs-ia32.cc | 150 +- deps/v8/src/ia32/codegen-ia32.cc | 22 +- deps/v8/src/ia32/debug-ia32.cc | 39 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 98 +- deps/v8/src/ia32/frames-ia32.h | 6 + deps/v8/src/ia32/full-codegen-ia32.cc | 307 +- deps/v8/src/ia32/ic-ia32.cc | 202 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 582 +- deps/v8/src/ia32/lithium-codegen-ia32.h | 33 +- deps/v8/src/ia32/lithium-gap-resolver-ia32.cc | 4 +- deps/v8/src/ia32/lithium-ia32.cc | 141 +- deps/v8/src/ia32/lithium-ia32.h | 106 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 174 +- deps/v8/src/ia32/macro-assembler-ia32.h | 26 +- .../src/ia32/regexp-macro-assembler-ia32.cc | 237 +- .../v8/src/ia32/regexp-macro-assembler-ia32.h | 24 +- deps/v8/src/ia32/simulator-ia32.h | 8 +- deps/v8/src/ia32/stub-cache-ia32.cc | 536 +- deps/v8/src/ic.cc | 251 +- deps/v8/src/ic.h | 28 +- deps/v8/src/incremental-marking-inl.h | 26 +- deps/v8/src/incremental-marking.cc | 56 +- deps/v8/src/incremental-marking.h | 15 +- deps/v8/src/interface.cc | 43 +- deps/v8/src/interface.h | 65 +- deps/v8/src/interpreter-irregexp.cc | 87 +- deps/v8/src/isolate.cc | 29 +- deps/v8/src/isolate.h | 49 +- deps/v8/src/json-parser.h | 16 +- deps/v8/src/jsregexp.cc | 2271 +++++--- deps/v8/src/jsregexp.h | 590 +- deps/v8/src/lazy-instance.h | 10 +- deps/v8/src/list-inl.h | 73 +- deps/v8/src/list.h | 66 +- deps/v8/src/lithium-allocator.cc | 89 +- deps/v8/src/lithium-allocator.h | 1 + deps/v8/src/lithium.cc | 49 +- deps/v8/src/lithium.h | 86 +- deps/v8/src/liveedit-debugger.js | 5 + deps/v8/src/liveedit.cc | 105 +- deps/v8/src/liveedit.h | 4 +- deps/v8/src/log.cc | 15 +- deps/v8/src/log.h | 2 + deps/v8/src/macros.py | 10 + deps/v8/src/mark-compact-inl.h | 28 +- deps/v8/src/mark-compact.cc | 340 +- deps/v8/src/mark-compact.h | 66 +- deps/v8/src/math.js | 1 - deps/v8/src/messages.js | 230 +- deps/v8/src/mips/builtins-mips.cc | 9 +- deps/v8/src/mips/code-stubs-mips.cc | 99 +- deps/v8/src/mips/codegen-mips.cc | 4 +- deps/v8/src/mips/constants-mips.h | 5 - deps/v8/src/mips/debug-mips.cc | 4 +- deps/v8/src/mips/full-codegen-mips.cc | 297 +- deps/v8/src/mips/ic-mips.cc | 68 +- deps/v8/src/mips/lithium-codegen-mips.cc | 391 +- deps/v8/src/mips/lithium-codegen-mips.h | 30 +- deps/v8/src/mips/lithium-gap-resolver-mips.cc | 4 +- deps/v8/src/mips/lithium-mips.cc | 147 +- deps/v8/src/mips/lithium-mips.h | 96 +- deps/v8/src/mips/macro-assembler-mips.cc | 101 +- deps/v8/src/mips/macro-assembler-mips.h | 18 +- .../src/mips/regexp-macro-assembler-mips.cc | 275 +- .../v8/src/mips/regexp-macro-assembler-mips.h | 22 +- deps/v8/src/mips/simulator-mips.cc | 9 +- deps/v8/src/mips/simulator-mips.h | 10 +- deps/v8/src/mips/stub-cache-mips.cc | 389 +- deps/v8/src/mirror-debugger.js | 88 +- deps/v8/src/mksnapshot.cc | 6 +- deps/v8/src/objects-debug.cc | 102 +- deps/v8/src/objects-inl.h | 600 +- deps/v8/src/objects-printer.cc | 48 +- deps/v8/src/objects-visiting-inl.h | 4 +- deps/v8/src/objects-visiting.cc | 1 + deps/v8/src/objects-visiting.h | 17 + deps/v8/src/objects.cc | 1776 +++--- deps/v8/src/objects.h | 549 +- deps/v8/src/parser.cc | 371 +- deps/v8/src/parser.h | 29 +- deps/v8/src/platform-cygwin.cc | 46 +- deps/v8/src/platform-freebsd.cc | 47 +- deps/v8/src/platform-linux.cc | 86 +- deps/v8/src/platform-macos.cc | 38 +- deps/v8/src/platform-nullos.cc | 5 + deps/v8/src/platform-openbsd.cc | 38 +- deps/v8/src/platform-posix.cc | 65 +- deps/v8/src/platform-posix.h | 5 +- deps/v8/src/platform-solaris.cc | 46 +- deps/v8/src/platform-win32.cc | 128 +- deps/v8/src/platform.h | 6 +- deps/v8/src/preparser.cc | 10 +- deps/v8/src/preparser.h | 15 +- deps/v8/src/profile-generator-inl.h | 29 + deps/v8/src/profile-generator.cc | 1976 +++---- deps/v8/src/profile-generator.h | 371 +- deps/v8/src/property-details.h | 5 +- deps/v8/src/property.cc | 7 - deps/v8/src/property.h | 17 - .../src/regexp-macro-assembler-irregexp-inl.h | 10 + .../v8/src/regexp-macro-assembler-irregexp.cc | 50 +- deps/v8/src/regexp-macro-assembler-irregexp.h | 13 +- deps/v8/src/regexp-macro-assembler-tracer.cc | 132 +- deps/v8/src/regexp-macro-assembler-tracer.h | 10 +- deps/v8/src/regexp-macro-assembler.cc | 15 +- deps/v8/src/regexp-macro-assembler.h | 43 +- deps/v8/src/regexp.js | 28 +- deps/v8/src/rewriter.cc | 4 +- deps/v8/src/runtime-profiler.cc | 66 +- deps/v8/src/runtime-profiler.h | 10 +- deps/v8/src/runtime.cc | 1183 ++-- deps/v8/src/runtime.h | 13 +- deps/v8/src/safepoint-table.cc | 19 +- deps/v8/src/safepoint-table.h | 19 +- deps/v8/src/scanner.cc | 18 +- deps/v8/src/scanner.h | 9 +- deps/v8/src/scopeinfo.cc | 16 +- deps/v8/src/scopes.cc | 196 +- deps/v8/src/scopes.h | 53 +- deps/v8/src/serialize.cc | 2 +- deps/v8/src/small-pointer-list.h | 32 +- deps/v8/src/spaces-inl.h | 20 +- deps/v8/src/spaces.cc | 60 +- deps/v8/src/spaces.h | 21 +- deps/v8/src/splay-tree-inl.h | 15 +- deps/v8/src/splay-tree.h | 23 +- deps/v8/src/string-stream.cc | 6 +- deps/v8/src/string.js | 151 +- deps/v8/src/stub-cache.cc | 61 +- deps/v8/src/stub-cache.h | 44 +- deps/v8/src/type-info.cc | 19 +- deps/v8/src/type-info.h | 6 +- deps/v8/src/utils.cc | 15 + deps/v8/src/utils.h | 26 + deps/v8/src/v8.cc | 16 +- deps/v8/src/v8.h | 1 + deps/v8/src/v8globals.h | 4 + deps/v8/src/v8natives.js | 4 +- deps/v8/src/version.cc | 6 +- deps/v8/src/x64/assembler-x64.h | 3 +- deps/v8/src/x64/builtins-x64.cc | 9 +- deps/v8/src/x64/code-stubs-x64.cc | 105 +- deps/v8/src/x64/codegen-x64.cc | 4 +- deps/v8/src/x64/debug-x64.cc | 15 +- deps/v8/src/x64/deoptimizer-x64.cc | 42 +- deps/v8/src/x64/disasm-x64.cc | 8 +- deps/v8/src/x64/full-codegen-x64.cc | 442 +- deps/v8/src/x64/ic-x64.cc | 36 +- deps/v8/src/x64/lithium-codegen-x64.cc | 489 +- deps/v8/src/x64/lithium-codegen-x64.h | 33 +- deps/v8/src/x64/lithium-gap-resolver-x64.cc | 4 +- deps/v8/src/x64/lithium-x64.cc | 139 +- deps/v8/src/x64/lithium-x64.h | 102 +- deps/v8/src/x64/macro-assembler-x64.cc | 108 +- deps/v8/src/x64/macro-assembler-x64.h | 11 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 249 +- deps/v8/src/x64/regexp-macro-assembler-x64.h | 38 +- deps/v8/src/x64/simulator-x64.h | 8 +- deps/v8/src/x64/stub-cache-x64.cc | 326 +- deps/v8/src/zone-inl.h | 21 +- deps/v8/src/zone.h | 71 +- deps/v8/test/cctest/cctest.status | 15 +- deps/v8/test/cctest/test-accessors.cc | 9 +- deps/v8/test/cctest/test-alloc.cc | 36 +- deps/v8/test/cctest/test-api.cc | 522 +- deps/v8/test/cctest/test-dataflow.cc | 2 +- deps/v8/test/cctest/test-debug.cc | 5 +- deps/v8/test/cctest/test-decls.cc | 16 +- deps/v8/test/cctest/test-disasm-arm.cc | 8 + deps/v8/test/cctest/test-disasm-x64.cc | 1 + deps/v8/test/cctest/test-double.cc | 15 - .../test/cctest/test-func-name-inference.cc | 38 + deps/v8/test/cctest/test-heap-profiler.cc | 562 +- deps/v8/test/cctest/test-heap.cc | 243 +- deps/v8/test/cctest/test-list.cc | 14 +- deps/v8/test/cctest/test-liveedit.cc | 3 +- deps/v8/test/cctest/test-mark-compact.cc | 10 +- deps/v8/test/cctest/test-parsing.cc | 5 +- deps/v8/test/cctest/test-regexp.cc | 253 +- deps/v8/test/cctest/test-spaces.cc | 8 +- deps/v8/test/cctest/test-strings.cc | 118 +- .../v8/test/cctest/test-thread-termination.cc | 4 + deps/v8/test/cctest/test-weakmaps.cc | 80 +- deps/v8/test/message/message.status | 2 +- deps/v8/test/mjsunit/accessor-map-sharing.js | 176 + .../mjsunit/array-bounds-check-removal.js | 145 + .../mjsunit/array-construct-transition.js | 6 +- .../test/mjsunit/array-literal-transitions.js | 20 +- .../mjsunit/compiler/alloc-object-huge.js | 2 +- .../test/mjsunit/compiler/inline-arguments.js | 67 + .../test/mjsunit/compiler/inline-construct.js | 6 +- deps/v8/test/mjsunit/compiler/literals.js | 24 +- .../test/mjsunit/compiler/optimize-bitnot.js | 42 + deps/v8/test/mjsunit/date-parse.js | 3 + .../debug-evaluate-locals-optimized-double.js | 17 +- .../debug-evaluate-locals-optimized.js | 17 +- deps/v8/test/mjsunit/debug-function-scopes.js | 162 + .../mjsunit/debug-liveedit-stack-padding.js | 88 + deps/v8/test/mjsunit/debug-scripts-request.js | 6 +- .../mjsunit/debug-stepin-builtin-callback.js | 157 + deps/v8/test/mjsunit/declare-locally.js | 6 +- deps/v8/test/mjsunit/elements-kind.js | 12 +- .../mjsunit/elements-transition-hoisting.js | 46 +- deps/v8/test/mjsunit/elements-transition.js | 10 +- deps/v8/test/mjsunit/error-constructors.js | 101 +- deps/v8/test/mjsunit/external-array.js | 80 + deps/v8/test/mjsunit/fast-array-length.js | 37 + deps/v8/test/mjsunit/fast-non-keyed.js | 113 + deps/v8/test/mjsunit/fast-prototype.js | 113 + .../mjsunit/harmony/debug-function-scopes.js | 115 + .../v8/test/mjsunit/harmony/module-linking.js | 121 + .../v8/test/mjsunit/harmony/module-parsing.js | 10 +- .../test/mjsunit/harmony/module-resolution.js | 2 +- deps/v8/test/mjsunit/harmony/proxies.js | 48 +- deps/v8/test/mjsunit/math-floor-of-div.js | 216 + deps/v8/test/mjsunit/mjsunit.js | 2 +- deps/v8/test/mjsunit/mjsunit.status | 14 +- .../mjsunit/override-read-only-property.js | 10 +- deps/v8/test/mjsunit/packed-elements.js | 112 + deps/v8/test/mjsunit/readonly.js | 228 + deps/v8/test/mjsunit/regexp-capture-3.js | 191 +- deps/v8/test/mjsunit/regexp-capture.js | 2 + deps/v8/test/mjsunit/regexp-global.js | 141 + deps/v8/test/mjsunit/regexp.js | 11 + deps/v8/test/mjsunit/regress/regress-1119.js | 12 +- .../v8/test/mjsunit/regress/regress-115452.js | 19 +- deps/v8/test/mjsunit/regress/regress-1170.js | 64 +- .../v8/test/mjsunit/regress/regress-117409.js | 2 +- .../v8/test/mjsunit/regress/regress-119609.js | 71 + .../test/mjsunit/regress/regress-1199637.js | 4 +- .../v8/test/mjsunit/regress/regress-120099.js | 40 + deps/v8/test/mjsunit/regress/regress-1217.js | 2 +- .../v8/test/mjsunit/regress/regress-123512.js | 78 + .../v8/test/mjsunit/regress/regress-123919.js | 47 + .../v8/test/mjsunit/regress/regress-126412.js | 33 + .../v8/test/mjsunit/regress/regress-128146.js | 33 + .../v8/test/mjsunit/regress/regress-131923.js | 30 + .../v8/test/mjsunit/regress/regress-1639-2.js | 5 +- deps/v8/test/mjsunit/regress/regress-1639.js | 22 +- deps/v8/test/mjsunit/regress/regress-1849.js | 6 +- deps/v8/test/mjsunit/regress/regress-1878.js | 4 +- deps/v8/test/mjsunit/regress/regress-2030.js | 53 + deps/v8/test/mjsunit/regress/regress-2032.js | 64 + deps/v8/test/mjsunit/regress/regress-2034.js | 46 + deps/v8/test/mjsunit/regress/regress-2054.js | 34 + deps/v8/test/mjsunit/regress/regress-2055.js | 48 + deps/v8/test/mjsunit/regress/regress-2058.js | 37 + deps/v8/test/mjsunit/regress/regress-2110.js | 53 + deps/v8/test/mjsunit/regress/regress-2153.js | 32 + deps/v8/test/mjsunit/regress/regress-2163.js | 70 + deps/v8/test/mjsunit/regress/regress-2170.js | 58 + deps/v8/test/mjsunit/regress/regress-334.js | 2 +- .../mjsunit/regress/regress-crbug-122271.js | 8 +- .../mjsunit/regress/regress-deep-proto.js | 45 + .../regress-fast-literal-transition.js | 62 + .../regress/regress-iteration-order.js | 42 + .../regress/regress-smi-only-concat.js | 4 +- .../mjsunit/regress/regress-transcendental.js | 49 + deps/v8/test/mjsunit/stack-traces.js | 14 + deps/v8/test/mjsunit/try-finally-continue.js | 72 + deps/v8/test/mjsunit/unbox-double-arrays.js | 7 +- .../mjsunit/unicodelctest-no-optimization.js | 4914 +++++++++++++++++ deps/v8/test/mjsunit/unicodelctest.js | 4912 ++++++++++++++++ deps/v8/test/mjsunit/with-readonly.js | 6 +- deps/v8/test/mozilla/mozilla.status | 28 +- deps/v8/test/mozilla/testcfg.py | 1 + deps/v8/test/sputnik/sputnik.status | 40 +- deps/v8/test/test262/README | 4 +- deps/v8/test/test262/test262.status | 23 +- deps/v8/test/test262/testcfg.py | 51 +- deps/v8/tools/fuzz-harness.sh | 92 + deps/v8/tools/grokdump.py | 728 ++- deps/v8/tools/gyp/v8.gyp | 63 +- deps/v8/tools/js2c.py | 6 +- deps/v8/tools/jsmin.py | 4 +- deps/v8/tools/presubmit.py | 5 +- deps/v8/tools/push-to-trunk.sh | 9 + deps/v8/tools/test-wrapper-gypbuild.py | 17 +- deps/v8/tools/test.py | 9 - 373 files changed, 36026 insertions(+), 11559 deletions(-) create mode 100644 deps/v8/DEPS delete mode 100644 deps/v8/build/armu.gypi delete mode 100644 deps/v8/build/mipsu.gypi create mode 100644 deps/v8/src/elements-kind.cc create mode 100644 deps/v8/src/elements-kind.h create mode 100644 deps/v8/test/mjsunit/accessor-map-sharing.js create mode 100644 deps/v8/test/mjsunit/array-bounds-check-removal.js create mode 100644 deps/v8/test/mjsunit/compiler/optimize-bitnot.js create mode 100644 deps/v8/test/mjsunit/debug-function-scopes.js create mode 100644 deps/v8/test/mjsunit/debug-liveedit-stack-padding.js create mode 100644 deps/v8/test/mjsunit/debug-stepin-builtin-callback.js create mode 100644 deps/v8/test/mjsunit/fast-array-length.js create mode 100644 deps/v8/test/mjsunit/fast-non-keyed.js create mode 100644 deps/v8/test/mjsunit/fast-prototype.js create mode 100644 deps/v8/test/mjsunit/harmony/debug-function-scopes.js create mode 100644 deps/v8/test/mjsunit/harmony/module-linking.js create mode 100644 deps/v8/test/mjsunit/math-floor-of-div.js create mode 100644 deps/v8/test/mjsunit/packed-elements.js create mode 100644 deps/v8/test/mjsunit/readonly.js create mode 100644 deps/v8/test/mjsunit/regexp-global.js create mode 100644 deps/v8/test/mjsunit/regress/regress-119609.js create mode 100644 deps/v8/test/mjsunit/regress/regress-120099.js create mode 100644 deps/v8/test/mjsunit/regress/regress-123512.js create mode 100644 deps/v8/test/mjsunit/regress/regress-123919.js create mode 100644 deps/v8/test/mjsunit/regress/regress-126412.js create mode 100644 deps/v8/test/mjsunit/regress/regress-128146.js create mode 100644 deps/v8/test/mjsunit/regress/regress-131923.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2030.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2032.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2034.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2054.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2055.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2058.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2110.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2153.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2163.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2170.js create mode 100644 deps/v8/test/mjsunit/regress/regress-deep-proto.js create mode 100644 deps/v8/test/mjsunit/regress/regress-fast-literal-transition.js create mode 100644 deps/v8/test/mjsunit/regress/regress-iteration-order.js create mode 100644 deps/v8/test/mjsunit/regress/regress-transcendental.js create mode 100644 deps/v8/test/mjsunit/try-finally-continue.js create mode 100644 deps/v8/test/mjsunit/unicodelctest-no-optimization.js create mode 100644 deps/v8/test/mjsunit/unicodelctest.js create mode 100644 deps/v8/tools/fuzz-harness.sh diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index dfefad129f..6e46b3d621 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -23,6 +23,7 @@ Daniel James Dineel D Sule Erich Ocean Fedor Indutny +Filipe David Manana Ioseb Dzmanashvili Jan de Mooij Jay Freeman diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 2240ec0e68..fae15e58ee 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,314 @@ +2012-06-13: Version 3.11.10 + + Implemented heap profiler memory usage reporting. + + Preserved error message during finally block in try..finally. + (Chromium issue 129171) + + Fixed EnsureCanContainElements to properly handle double values. + (issue 2170) + + Improved heuristics to keep objects in fast mode with inherited + constructors. + + Performance and stability improvements on all platforms. + + +2012-06-06: Version 3.11.9 + + Implemented ES5-conformant semantics for inherited setters and read-only + properties. Currently behind --es5_readonly flag, because it breaks + WebKit bindings. + + Exposed last seen heap object id via v8 public api. + + Performance and stability improvements on all platforms. + + +2012-05-31: Version 3.11.8 + + Avoid overdeep recursion in regexp where a guarded expression with a + minimum repetition count is inside another quantifier. + (Chromium issue 129926) + + Fixed missing write barrier in store field stub. + (issues 2143, 1465, Chromium issue 129355) + + Proxies: Fixed receiver for setters inherited from proxies. + Proxies: Fixed ToStringArray function so that it does not reject some + keys. + (issue 1543) + + Performance and stability improvements on all platforms. + + +2012-05-29: Version 3.11.7 + + Get better function names in stack traces. + + Performance and stability improvements on all platforms. + + +2012-05-24: Version 3.11.6 + + Fixed RegExp.prototype.toString for incompatible receivers + (issue 1981). + + Performance and stability improvements on all platforms. + + +2012-05-23: Version 3.11.5 + + Performance and stability improvements on all platforms. + + +2012-05-22: Version 3.11.4 + + Some cleanup to common.gypi. This fixes some host/target combinations + that weren't working in the Make build on Mac. + + Handle EINTR in socket functions and continue incomplete sends. + (issue 2098) + + Fixed python deprecations. (issue 1391) + + Made socket send and receive more robust and return 0 on failure. + (Chromium issue 15719) + + Fixed GCC 4.7 (C++11) compilation. (issue 2136) + + Set '-m32' option for host and target platforms + + Performance and stability improvements on all platforms. + + +2012-05-18: Version 3.11.3 + + Disable optimization for functions that have scopes that cannot be + reconstructed from the context chain. (issue 2071) + + Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078) + + Correctly check for native error objects. (Chromium issue 2138) + + Performance and stability improvements on all platforms. + + +2012-05-16: Version 3.11.2 + + Revert r11496. (Chromium issue 128146) + + Implement map collection for incremental marking. (issue 1465) + + Add toString method to CallSite (which describes a frame of the + stack trace). + + +2012-05-15: Version 3.11.1 + + Added a readbuffer function to d8 that reads a file into an ArrayBuffer. + + Fix freebsd build. (V8 issue 2126) + + Performance and stability improvements on all platforms. + + +2012-05-11: Version 3.11.0 + + Fixed compose-discard crasher from r11524 (issue 2123). + + Activated new global semantics by default. Global variables can + now shadow properties of the global object (ES5.1 erratum). + + Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when + transitioning (Chromium issue 117409). + + Made Error.prototype.name writable again, as required by the spec and + the web (Chromium issue 69187). + + Implemented map collection with incremental marking (issue 1465). + + Regexp: Fixed overflow in min-match-length calculation + (Chromium issue 126412). + + MIPS: Fixed illegal instruction use on Loongson in code for + Math.random() (issue 2115). + + Fixed crash bug in VisitChoice (Chromium issue 126272). + + Fixed unsigned-Smi check in MappedArgumentsLookup + (Chromium issue 126414). + + Fixed LiveEdit for function with no locals (issue 825). + + Fixed register clobbering in LoadIC for interceptors + (Chromium issue 125988). + + Implemented clearing of CompareICs (issue 2102). + + Performance and stability improvements on all platforms. + + +2012-05-03: Version 3.10.8 + + Enabled MIPS cross-compilation. + + Ensured reload of elements pointer in StoreFastDoubleElement stub. + (Chromium issue 125515) + + Fixed corner cases in truncation behavior when storing to + TypedArrays. (issue 2110) + + Fixed failure to properly recognize and report out-of-memory + conditions when allocating code space pages. (Chromium issue + 118625) + + Fixed idle notifications to perform a round of incremental GCs + after context disposal. (issue 2107) + + Fixed preparser for try statement. (issue 2109) + + Performance and stability improvements on all platforms. + + +2012-04-30: Version 3.10.7 + + Performance and stability improvements on all platforms. + + +2012-04-26: Version 3.10.6 + + Fixed some bugs in accessing details of the last regexp match. + + Fixed source property of empty RegExp objects. (issue 1982) + + Enabled inlining some V8 API functions. + + Performance and stability improvements on all platforms. + + +2012-04-23: Version 3.10.5 + + Put new global var semantics behind a flag until WebKit tests are + cleaned up. + + Enabled stepping into callback passed to builtins. + (Chromium issue 109564) + + Performance and stability improvements on all platforms. + + +2012-04-19: Version 3.10.4 + + Fixed issues when stressing compaction with WeakMaps. + + Fixed missing GVN flag for new-space promotion. (Chromium issue 123919) + + Simplify invocation sequence at monomorphic function invocation sites. + (issue 2079) + + Performance and stability improvements on all platforms. + + +2012-04-17: Version 3.10.3 + + Fixed several bugs in heap profiles (including issue 2078). + + Throw syntax errors on illegal escape sequences. + + Implemented rudimentary module linking (behind --harmony flag) + + Implemented ES5 erratum: Global declarations should shadow + inherited properties. + + Made handling of const more consistent when combined with 'eval' + and 'with'. + + Fixed V8 on MinGW-x64 (issue 2026). + + Performance and stability improvements on all platforms. + + +2012-04-13: Version 3.10.2 + + Fixed native ARM build (issues 1744, 539) + + Return LOOKUP variable instead of CONTEXT for non-context allocated + outer scope parameters (Chromium issue 119609). + + Fixed regular and ElementsKind transitions interfering with each other + (Chromium issue 122271). + + Improved performance of keyed loads/stores which have a HeapNumber + index (issues 1388, 1295). + + Fixed WeakMap processing for evacuation candidates (issue 2060). + + Bailout on possible direct eval calls (Chromium issue 122681). + + Do not assume that names of function expressions are context-allocated + (issue 2051). + + Performance and stability improvements on all platforms. + + +2012-04-10: Version 3.10.1 + + Fixed bug with arguments object in inlined functions (issue 2045). + + Fixed performance bug with lazy initialization (Chromium issue + 118686). + + Added suppport for Mac OS X 64bit builds with GYP. + (Patch contributed by Filipe David Manana ) + + Fixed bug with hidden properties (issue 2034). + + Fixed a performance bug when reloading pages (Chromium issue 117767, + V8 issue 1902). + + Fixed bug when optimizing throw in top-level code (issue 2054). + + Fixed two bugs with array literals (issue 2055, Chromium issue 121407). + + Fixed bug with Math.min/Math.max with NaN inputs (issue 2056). + + Fixed a bug with the new runtime profiler (Chromium issue 121147). + + Fixed compilation of V8 using uClibc. + + Optimized boot-up memory use. + + Optimized regular expressions. + + +2012-03-30: Version 3.10.0 + + Fixed store IC writability check in strict mode + (Chromium issue 120099). + + Resynchronize timers if the Windows system time was changed. + (Chromium issue 119815) + + Removed "-mfloat-abi=hard" from host compiler cflags when building for + hardfp ARM + (https://code.google.com/p/chrome-os-partner/issues/detail?id=8539) + + Fixed edge case for case independent regexp character classes + (issue 2032). + + Reset function info counters after context disposal. + (Chromium issue 117767, V8 issue 1902) + + Fixed missing write barrier in CopyObjectToObjectElements. + (Chromium issue 119926) + + Fixed missing bounds check in HasElementImpl. + (Chromium issue 119925) + + Performance and stability improvements on all platforms. + + 2012-03-23: Version 3.9.24 Activated count-based profiler for ARM. diff --git a/deps/v8/DEPS b/deps/v8/DEPS new file mode 100644 index 0000000000..e50d1d20f6 --- /dev/null +++ b/deps/v8/DEPS @@ -0,0 +1,27 @@ +# Note: The buildbots evaluate this file with CWD set to the parent +# directory and assume that the root of the checkout is in ./v8/, so +# all paths in here must match this assumption. + +deps = { + # Remember to keep the revision in sync with the Makefile. + "v8/build/gyp": + "http://gyp.googlecode.com/svn/trunk@1282", +} + +deps_os = { + "win": { + "v8/third_party/cygwin": + "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844", + + "v8/third_party/python_26": + "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111", + } +} + +hooks = [ + { + # A change to a .gyp, .gypi, or to GYP itself should run the generator. + "pattern": ".", + "action": ["python", "v8/build/gyp_v8"], + }, +] diff --git a/deps/v8/Makefile b/deps/v8/Makefile index 2f86c512e4..0d825c0795 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -150,21 +150,21 @@ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES)) $(ARCHES): $(addprefix $$@.,$(MODES)) # Defines how to build a particular target (e.g. ia32.release). -$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@) - @$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \ +$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@) + @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \ CXX="$(CXX)" LINK="$(LINK)" \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ python -c "print raw_input().capitalize()") \ builddir="$(shell pwd)/$(OUTDIR)/$@" -native: $(OUTDIR)/Makefile-native - @$(MAKE) -C "$(OUTDIR)" -f Makefile-native \ +native: $(OUTDIR)/Makefile.native + @$(MAKE) -C "$(OUTDIR)" -f Makefile.native \ CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \ builddir="$(shell pwd)/$(OUTDIR)/$@" # TODO(jkummerow): add "android.debug" when we need it. -android android.release: $(OUTDIR)/Makefile-android - @$(MAKE) -C "$(OUTDIR)" -f Makefile-android \ +android android.release: $(OUTDIR)/Makefile.android + @$(MAKE) -C "$(OUTDIR)" -f Makefile.android \ CXX="$(ANDROID_TOOL_PREFIX)-g++" \ AR="$(ANDROID_TOOL_PREFIX)-ar" \ RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \ @@ -197,55 +197,41 @@ native.check: native --arch-and-mode=. $(TESTFLAGS) # Clean targets. You can clean each architecture individually, or everything. -$(addsuffix .clean,$(ARCHES)): - rm -f $(OUTDIR)/Makefile-$(basename $@) +$(addsuffix .clean,$(ARCHES)) android.clean: + rm -f $(OUTDIR)/Makefile.$(basename $@) rm -rf $(OUTDIR)/$(basename $@).release rm -rf $(OUTDIR)/$(basename $@).debug - find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete + find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete native.clean: - rm -f $(OUTDIR)/Makefile-native + rm -f $(OUTDIR)/Makefile.native rm -rf $(OUTDIR)/native - find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete + find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete -android.clean: - rm -f $(OUTDIR)/Makefile-android - rm -rf $(OUTDIR)/android.release - find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete - -clean: $(addsuffix .clean,$(ARCHES)) native.clean +clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean # GYP file generation targets. -$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE) - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \ - -S-ia32 $(GYPFLAGS) - -$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE) - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \ - -S-x64 $(GYPFLAGS) - -$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi - build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \ - -S-arm $(GYPFLAGS) - -$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi +MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES)) +$(MAKEFILES): $(GYPFILES) $(ENVFILE) + GYP_GENERATORS=make \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \ - -S-mips $(GYPFLAGS) + -Ibuild/standalone.gypi --depth=. \ + -Dv8_target_arch=$(subst .,,$(suffix $@)) \ + -S.$(subst .,,$(suffix $@)) $(GYPFLAGS) -$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE) +$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE) + GYP_GENERATORS=make \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ - -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS) + -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) -$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \ +$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \ must-set-ANDROID_NDK_ROOT + GYP_GENERATORS=make \ CC="${ANDROID_TOOL_PREFIX}-gcc" \ + CXX="${ANDROID_TOOL_PREFIX}-g++" \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ - -S-android $(GYPFLAGS) + -S.android $(GYPFLAGS) must-set-ANDROID_NDK_ROOT: ifndef ANDROID_NDK_ROOT @@ -261,9 +247,10 @@ $(ENVFILE): $(ENVFILE).new # Stores current GYPFLAGS in a file. $(ENVFILE).new: - @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; + @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \ + echo "CXX=$(CXX)" >> $(ENVFILE).new # Dependencies. dependencies: svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \ - --revision 1026 + --revision 1282 diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index b0d1344700..ebce7ff892 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -101,14 +101,14 @@ LIBRARY_FLAGS = { 'os:linux': { 'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS, 'library:shared': { - 'CPPDEFINES': ['V8_SHARED'], + 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], 'LIBS': ['pthread'] } }, 'os:macos': { 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'], 'library:shared': { - 'CPPDEFINES': ['V8_SHARED'] + 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], } }, 'os:freebsd': { diff --git a/deps/v8/build/armu.gypi b/deps/v8/build/armu.gypi deleted file mode 100644 index d15b8ab705..0000000000 --- a/deps/v8/build/armu.gypi +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2011 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -{ - 'variables': { - 'target_arch': 'ia32', - 'v8_target_arch': 'arm', - 'armv7': 1, - 'arm_neon': 0, - 'arm_fpu': 'vfpv3', - }, -} diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 5fa109b4d1..7f084b8c1d 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -110,133 +110,117 @@ ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], - ['OS!="mac"', { - # TODO(mark): The OS!="mac" conditional is temporary. It can be - # removed once the Mac Chromium build stops setting target_arch to - # ia32 and instead sets it to mac. Other checks in this file for - # OS=="mac" can be removed at that time as well. This can be cleaned - # up once http://crbug.com/44205 is fixed. + ['v8_target_arch=="arm"', { + 'defines': [ + 'V8_TARGET_ARCH_ARM', + ], 'conditions': [ - ['v8_target_arch=="arm"', { + [ 'v8_can_use_unaligned_accesses=="true"', { 'defines': [ - 'V8_TARGET_ARCH_ARM', + 'CAN_USE_UNALIGNED_ACCESSES=1', ], - 'conditions': [ - [ 'v8_can_use_unaligned_accesses=="true"', { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=1', - ], - }], - [ 'v8_can_use_unaligned_accesses=="false"', { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=0', - ], - }], - [ 'v8_can_use_vfp_instructions=="true"', { - 'defines': [ - 'CAN_USE_VFP_INSTRUCTIONS', - ], - }], - [ 'v8_use_arm_eabi_hardfloat=="true"', { - 'defines': [ - 'USE_EABI_HARDFLOAT=1', - 'CAN_USE_VFP_INSTRUCTIONS', - ], - 'cflags': [ - '-mfloat-abi=hard', - ], - }, { - 'defines': [ - 'USE_EABI_HARDFLOAT=0', - ], - }], - # The ARM assembler assumes the host is 32 bits, - # so force building 32-bit host tools. - ['host_arch=="x64" or OS=="android"', { - 'target_conditions': [ - ['_toolset=="host"', { - 'cflags': ['-m32'], - 'ldflags': ['-m32'], - }], - ], - }], + }], + [ 'v8_can_use_unaligned_accesses=="false"', { + 'defines': [ + 'CAN_USE_UNALIGNED_ACCESSES=0', ], }], - ['v8_target_arch=="ia32"', { + [ 'v8_can_use_vfp_instructions=="true"', { 'defines': [ - 'V8_TARGET_ARCH_IA32', + 'CAN_USE_VFP_INSTRUCTIONS', ], }], - ['v8_target_arch=="mips"', { + [ 'v8_use_arm_eabi_hardfloat=="true"', { 'defines': [ - 'V8_TARGET_ARCH_MIPS', + 'USE_EABI_HARDFLOAT=1', + 'CAN_USE_VFP_INSTRUCTIONS', ], - 'conditions': [ - [ 'target_arch=="mips"', { - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags': ['-EL'], - 'ldflags': ['-EL'], - 'conditions': [ - [ 'v8_use_mips_abi_hardfloat=="true"', { - 'cflags': ['-mhard-float'], - 'ldflags': ['-mhard-float'], - }, { - 'cflags': ['-msoft-float'], - 'ldflags': ['-msoft-float'], - }], - ['mips_arch_variant=="mips32r2"', { - 'cflags': ['-mips32r2', '-Wa,-mips32r2'], - }], - ['mips_arch_variant=="loongson"', { - 'cflags': ['-mips3', '-Wa,-mips3'], - }, { - 'cflags': ['-mips32', '-Wa,-mips32'], - }], - ], - }], - ], - }], - [ 'v8_can_use_fpu_instructions=="true"', { - 'defines': [ - 'CAN_USE_FPU_INSTRUCTIONS', - ], + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-mfloat-abi=hard',], }], - [ 'v8_use_mips_abi_hardfloat=="true"', { - 'defines': [ - '__mips_hard_float=1', - 'CAN_USE_FPU_INSTRUCTIONS', - ], - }, { - 'defines': [ - '__mips_soft_float=1' - ], - }], - ['mips_arch_variant=="mips32r2"', { - 'defines': ['_MIPS_ARCH_MIPS32R2',], - }], - ['mips_arch_variant=="loongson"', { - 'defines': ['_MIPS_ARCH_LOONGSON',], - }], - # The MIPS assembler assumes the host is 32 bits, - # so force building 32-bit host tools. - ['host_arch=="x64"', { - 'target_conditions': [ - ['_toolset=="host"', { - 'cflags': ['-m32'], - 'ldflags': ['-m32'], + ], + }, { + 'defines': [ + 'USE_EABI_HARDFLOAT=0', + ], + }], + ], + }], # v8_target_arch=="arm" + ['v8_target_arch=="ia32"', { + 'defines': [ + 'V8_TARGET_ARCH_IA32', + ], + }], # v8_target_arch=="ia32" + ['v8_target_arch=="mips"', { + 'defines': [ + 'V8_TARGET_ARCH_MIPS', + ], + 'variables': { + 'mipscompiler': '&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")', + }, + 'conditions': [ + ['mipscompiler=="yes"', { + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-EL'], + 'ldflags': ['-EL'], + 'conditions': [ + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'cflags': ['-mhard-float'], + 'ldflags': ['-mhard-float'], + }, { + 'cflags': ['-msoft-float'], + 'ldflags': ['-msoft-float'], + }], + ['mips_arch_variant=="mips32r2"', { + 'cflags': ['-mips32r2', '-Wa,-mips32r2'], + }], + ['mips_arch_variant=="loongson"', { + 'cflags': ['-mips3', '-Wa,-mips3'], + }, { + 'cflags': ['-mips32', '-Wa,-mips32'], }], ], }], ], }], - ['v8_target_arch=="x64"', { + [ 'v8_can_use_fpu_instructions=="true"', { 'defines': [ - 'V8_TARGET_ARCH_X64', + 'CAN_USE_FPU_INSTRUCTIONS', ], }], + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'defines': [ + '__mips_hard_float=1', + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }, { + 'defines': [ + '__mips_soft_float=1' + ], + }], + ['mips_arch_variant=="mips32r2"', { + 'defines': ['_MIPS_ARCH_MIPS32R2',], + }], + ['mips_arch_variant=="loongson"', { + 'defines': ['_MIPS_ARCH_LOONGSON',], + }], ], - }], + }], # v8_target_arch=="mips" + ['v8_target_arch=="x64"', { + 'defines': [ + 'V8_TARGET_ARCH_X64', + ], + 'xcode_settings': { + 'ARCHS': [ 'x86_64' ], + }, + 'msvs_settings': { + 'VCLinkerTool': { + 'StackReserveSize': '2097152', + }, + }, + }], # v8_target_arch=="x64" ['v8_use_liveobjectlist=="true"', { 'defines': [ 'ENABLE_DEBUGGER_SUPPORT', @@ -254,6 +238,11 @@ 'defines': [ 'WIN32', ], + 'msvs_configuration_attributes': { + 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', + 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', + 'CharacterSet': '1', + }, }], ['OS=="win" and v8_enable_prof==1', { 'msvs_settings': { @@ -262,20 +251,9 @@ }, }, }], - ['OS=="win" and v8_target_arch=="x64"', { - 'msvs_settings': { - 'VCLinkerTool': { - 'StackReserveSize': '2097152', - }, - }, - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd"', { 'conditions': [ - [ 'target_arch=="ia32"', { - 'cflags': [ '-m32' ], - 'ldflags': [ '-m32' ], - }], [ 'v8_no_strict_aliasing==1', { 'cflags': [ '-fno-strict-aliasing' ], }], @@ -284,6 +262,41 @@ ['OS=="solaris"', { 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. }], + ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + or OS=="netbsd" or OS=="mac" or OS=="android") and \ + (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ + v8_target_arch=="mips")', { + # Check whether the host compiler and target compiler support the + # '-m32' option and set it if so. + 'target_conditions': [ + ['_toolset=="host"', { + 'variables': { + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', + }, + 'cflags': [ '<(m32flag)' ], + 'ldflags': [ '<(m32flag)' ], + 'xcode_settings': { + 'ARCHS': [ 'i386' ], + }, + }], + ['_toolset=="target"', { + 'variables': { + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', + }, + 'cflags': [ '<(m32flag)' ], + 'ldflags': [ '<(m32flag)' ], + 'xcode_settings': { + 'ARCHS': [ 'i386' ], + }, + }], + ], + }], + ['OS=="freebsd" or OS=="openbsd"', { + 'cflags': [ '-I/usr/local/include' ], + }], + ['OS=="netbsd"', { + 'cflags': [ '-I/usr/pkg/include' ], + }], ], # conditions 'configurations': { 'Debug': { @@ -310,14 +323,8 @@ }, }, 'conditions': [ - ['OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-I/usr/local/include' ], - }], - ['OS=="netbsd"', { - 'cflags': [ '-I/usr/pkg/include' ], - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { - 'cflags': [ '-Wno-unused-parameter', + 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], ], @@ -345,12 +352,6 @@ }], ], }], - ['OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-I/usr/local/include' ], - }], - ['OS=="netbsd"', { - 'cflags': [ '-I/usr/pkg/include' ], - }], ['OS=="mac"', { 'xcode_settings': { 'GCC_OPTIMIZATION_LEVEL': '3', # -O3 @@ -363,11 +364,6 @@ }, }], # OS=="mac" ['OS=="win"', { - 'msvs_configuration_attributes': { - 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', - 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', - 'CharacterSet': '1', - }, 'msvs_settings': { 'VCCLCompilerTool': { 'Optimization': '2', diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8 index 6d5c126844..345f777d79 100755 --- a/deps/v8/build/gyp_v8 +++ b/deps/v8/build/gyp_v8 @@ -1,6 +1,6 @@ #!/usr/bin/python # -# Copyright 2010 the V8 project authors. All rights reserved. +# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: diff --git a/deps/v8/build/mipsu.gypi b/deps/v8/build/mipsu.gypi deleted file mode 100644 index 637ff841e4..0000000000 --- a/deps/v8/build/mipsu.gypi +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2012 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -{ - 'variables': { - 'target_arch': 'ia32', - 'v8_target_arch': 'mips', - }, -} diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi index b5707800f8..ebdf557230 100644 --- a/deps/v8/build/standalone.gypi +++ b/deps/v8/build/standalone.gypi @@ -37,8 +37,9 @@ 'variables': { 'variables': { 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { - # This handles the Linux platforms we generally deal with. + ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ + OS=="netbsd" or OS=="mac"', { + # This handles the Unix platforms we generally deal with. # Anything else gets passed through, which probably won't work # very well; such hosts should pass an explicit target_arch # to gyp. @@ -46,7 +47,8 @@ ' value); + + /** + * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return + * it in case heap profiler cannot find id for the object passed as + * parameter. HeapSnapshot::GetNodeById will always return NULL for such id. + */ + static const SnapshotObjectId kUnknownObjectId = 0; + /** * Takes a heap snapshot and returns it. Title may be an empty string. * See HeapSnapshot::Type for types description. @@ -413,6 +434,34 @@ class V8EXPORT HeapProfiler { HeapSnapshot::Type type = HeapSnapshot::kFull, ActivityControl* control = NULL); + /** + * Starts tracking of heap objects population statistics. After calling + * this method, all heap objects relocations done by the garbage collector + * are being registered. + */ + static void StartHeapObjectsTracking(); + + /** + * Adds a new time interval entry to the aggregated statistics array. The + * time interval entry contains information on the current heap objects + * population size. The method also updates aggregated statistics and + * reports updates for all previous time intervals via the OutputStream + * object. Updates on each time interval are provided as a stream of the + * HeapStatsUpdate structure instances. + * The return value of the function is the last seen heap object Id. + * + * StartHeapObjectsTracking must be called before the first call to this + * method. + */ + static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); + + /** + * Stops tracking of heap objects population statistics, cleans up all + * collected data. StartHeapObjectsTracking must be called again prior to + * calling PushHeapObjectsStats next time. + */ + static void StopHeapObjectsTracking(); + /** * Deletes all snapshots taken. All previously returned pointers to * snapshots and their contents become invalid after this call. @@ -433,6 +482,9 @@ class V8EXPORT HeapProfiler { /** Returns the number of currently existing persistent handles. */ static int GetPersistentHandleCount(); + + /** Returns memory used for profiler internal data and snapshots. */ + static size_t GetMemorySizeUsedByProfiler(); }; @@ -510,6 +562,19 @@ class V8EXPORT RetainedObjectInfo { // NOLINT }; +/** + * A struct for exporting HeapStats data from V8, using "push" model. + * See HeapProfiler::PushHeapObjectsStats. + */ +struct HeapStatsUpdate { + HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size) + : index(index), count(count), size(size) { } + uint32_t index; // Index of the time interval that was changed. + uint32_t count; // New value of count field for the interval with this index. + uint32_t size; // New value of size field for the interval with this index. +}; + + } // namespace v8 diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 33179f5bf0..77ffb385ab 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -62,11 +62,13 @@ #else // _WIN32 -// Setup for Linux shared library export. There is no need to distinguish -// between building or using the V8 shared library, but we should not -// export symbols when we are building a static library. +// Setup for Linux shared library export. #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) +#ifdef BUILDING_V8_SHARED #define V8EXPORT __attribute__ ((visibility("default"))) +#else +#define V8EXPORT +#endif #else // defined(__GNUC__) && (__GNUC__ >= 4) #define V8EXPORT #endif // defined(__GNUC__) && (__GNUC__ >= 4) @@ -98,6 +100,7 @@ class Function; class Date; class ImplementationUtilities; class Signature; +class AccessorSignature; template class Handle; template class Local; template class Persistent; @@ -107,6 +110,7 @@ class Data; class AccessorInfo; class StackTrace; class StackFrame; +class Isolate; namespace internal { @@ -862,13 +866,13 @@ class Value : public Data { * Returns true if this value is the undefined value. See ECMA-262 * 4.3.10. */ - V8EXPORT bool IsUndefined() const; + inline bool IsUndefined() const; /** * Returns true if this value is the null value. See ECMA-262 * 4.3.11. */ - V8EXPORT bool IsNull() const; + inline bool IsNull() const; /** * Returns true if this value is true. @@ -982,7 +986,11 @@ class Value : public Data { V8EXPORT bool StrictEquals(Handle that) const; private: + inline bool QuickIsUndefined() const; + inline bool QuickIsNull() const; inline bool QuickIsString() const; + V8EXPORT bool FullIsUndefined() const; + V8EXPORT bool FullIsNull() const; V8EXPORT bool FullIsString() const; }; @@ -1079,6 +1087,7 @@ class String : public Primitive { * A zero length string. */ V8EXPORT static v8::Local Empty(); + inline static v8::Local Empty(Isolate* isolate); /** * Returns true if the string is external @@ -1236,8 +1245,7 @@ class String : public Primitive { * this function should not otherwise delete or modify the resource. Neither * should the underlying buffer be deallocated or modified except through the * destructor of the external string resource. - */ - V8EXPORT static Local NewExternal( + */ V8EXPORT static Local NewExternal( ExternalAsciiStringResource* resource); /** @@ -1968,10 +1976,13 @@ class Arguments { inline Local Holder() const; inline bool IsConstructCall() const; inline Local Data() const; + inline Isolate* GetIsolate() const; + private: - static const int kDataIndex = 0; - static const int kCalleeIndex = -1; - static const int kHolderIndex = -2; + static const int kIsolateIndex = 0; + static const int kDataIndex = -1; + static const int kCalleeIndex = -2; + static const int kHolderIndex = -3; friend class ImplementationUtilities; inline Arguments(internal::Object** implicit_args, @@ -1993,9 +2004,11 @@ class V8EXPORT AccessorInfo { public: inline AccessorInfo(internal::Object** args) : args_(args) { } + inline Isolate* GetIsolate() const; inline Local Data() const; inline Local This() const; inline Local Holder() const; + private: internal::Object** args_; }; @@ -2277,7 +2290,8 @@ class V8EXPORT FunctionTemplate : public Template { AccessorSetter setter, Handle data, AccessControl settings, - PropertyAttribute attributes); + PropertyAttribute attributes, + Handle signature); void SetNamedInstancePropertyHandler(NamedPropertyGetter getter, NamedPropertySetter setter, NamedPropertyQuery query, @@ -2335,13 +2349,20 @@ class V8EXPORT ObjectTemplate : public Template { * cross-context access. * \param attribute The attributes of the property for which an accessor * is added. + * \param signature The signature describes valid receivers for the accessor + * and is used to perform implicit instance checks against them. If the + * receiver is incompatible (i.e. is not an instance of the constructor as + * defined by FunctionTemplate::HasInstance()), an implicit TypeError is + * thrown and no callback is invoked. */ void SetAccessor(Handle name, AccessorGetter getter, AccessorSetter setter = 0, Handle data = Handle(), AccessControl settings = DEFAULT, - PropertyAttribute attribute = None); + PropertyAttribute attribute = None, + Handle signature = + Handle()); /** * Sets a named property handler on the object template. @@ -2445,8 +2466,8 @@ class V8EXPORT ObjectTemplate : public Template { /** - * A Signature specifies which receivers and arguments a function can - * legally be called with. + * A Signature specifies which receivers and arguments are valid + * parameters to a function. */ class V8EXPORT Signature : public Data { public: @@ -2459,6 +2480,19 @@ class V8EXPORT Signature : public Data { }; +/** + * An AccessorSignature specifies which receivers are valid parameters + * to an accessor callback. + */ +class V8EXPORT AccessorSignature : public Data { + public: + static Local New(Handle receiver = + Handle()); + private: + AccessorSignature(); +}; + + /** * A utility for determining the type of objects based on the template * they were constructed from. @@ -2552,6 +2586,11 @@ Handle V8EXPORT Null(); Handle V8EXPORT True(); Handle V8EXPORT False(); +inline Handle Undefined(Isolate* isolate); +inline Handle Null(Isolate* isolate); +inline Handle True(Isolate* isolate); +inline Handle False(Isolate* isolate); + /** * A set of constraints that specifies the limits of the runtime's memory use. @@ -2802,13 +2841,13 @@ class V8EXPORT Isolate { /** * Associate embedder-specific data with the isolate */ - void SetData(void* data); + inline void SetData(void* data); /** - * Retrive embedder-specific data from the isolate. + * Retrieve embedder-specific data from the isolate. * Returns NULL if SetData has never been called. */ - void* GetData(); + inline void* GetData(); private: Isolate(); @@ -3153,7 +3192,8 @@ class V8EXPORT V8 { * that is kept alive by JavaScript objects. * \returns the adjusted value. */ - static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); + static intptr_t AdjustAmountOfExternalAllocatedMemory( + intptr_t change_in_bytes); /** * Suspends recording of tick samples in the profiler. @@ -3735,6 +3775,12 @@ class V8EXPORT Locker { }; +/** + * A struct for exporting HeapStats data from V8, using "push" model. + */ +struct HeapStatsUpdate; + + /** * An interface for exporting data from V8, using "push" model. */ @@ -3760,6 +3806,14 @@ class V8EXPORT OutputStream { // NOLINT * will not be called in case writing was aborted. */ virtual WriteResult WriteAsciiChunk(char* data, int size) = 0; + /** + * Writes the next chunk of heap stats data into the stream. Writing + * can be stopped by returning kAbort as function result. EndOfStream + * will not be called in case writing was aborted. + */ + virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) { + return kAbort; + }; }; @@ -3848,18 +3902,6 @@ const uintptr_t kEncodablePointerMask = PlatformSmiTagging::kEncodablePointerMask; const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; -template struct InternalConstants; - -// Internal constants for 32-bit systems. -template <> struct InternalConstants<4> { - static const int kStringResourceOffset = 3 * kApiPointerSize; -}; - -// Internal constants for 64-bit systems. -template <> struct InternalConstants<8> { - static const int kStringResourceOffset = 3 * kApiPointerSize; -}; - /** * This class exports constants and functionality from within v8 that * is necessary to implement inline functions in the v8 api. Don't @@ -3871,18 +3913,31 @@ class Internals { // the implementation of v8. static const int kHeapObjectMapOffset = 0; static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize; - static const int kStringResourceOffset = - InternalConstants::kStringResourceOffset; + static const int kStringResourceOffset = 3 * kApiPointerSize; + static const int kOddballKindOffset = 3 * kApiPointerSize; static const int kForeignAddressOffset = kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kFullStringRepresentationMask = 0x07; static const int kExternalTwoByteRepresentationTag = 0x02; + static const int kIsolateStateOffset = 0; + static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize; + static const int kIsolateRootsOffset = 3 * kApiPointerSize; + static const int kUndefinedValueRootIndex = 5; + static const int kNullValueRootIndex = 7; + static const int kTrueValueRootIndex = 8; + static const int kFalseValueRootIndex = 9; + static const int kEmptySymbolRootIndex = 128; + static const int kJSObjectType = 0xaa; static const int kFirstNonstringType = 0x80; + static const int kOddballType = 0x82; static const int kForeignType = 0x85; + static const int kUndefinedOddballKind = 5; + static const int kNullOddballKind = 3; + static inline bool HasHeapObjectTag(internal::Object* value) { return ((reinterpret_cast(value) & kHeapObjectTagMask) == kHeapObjectTag); @@ -3902,6 +3957,11 @@ class Internals { return ReadField(map, kMapInstanceTypeOffset); } + static inline int GetOddballKind(internal::Object* obj) { + typedef internal::Object O; + return SmiValue(ReadField(obj, kOddballKindOffset)); + } + static inline void* GetExternalPointerFromSmi(internal::Object* value) { const uintptr_t address = reinterpret_cast(value); return reinterpret_cast(address >> kPointerToSmiShift); @@ -3922,6 +3982,28 @@ class Internals { return representation == kExternalTwoByteRepresentationTag; } + static inline bool IsInitialized(v8::Isolate* isolate) { + uint8_t* addr = reinterpret_cast(isolate) + kIsolateStateOffset; + return *reinterpret_cast(addr) == 1; + } + + static inline void SetEmbedderData(v8::Isolate* isolate, void* data) { + uint8_t* addr = reinterpret_cast(isolate) + + kIsolateEmbedderDataOffset; + *reinterpret_cast(addr) = data; + } + + static inline void* GetEmbedderData(v8::Isolate* isolate) { + uint8_t* addr = reinterpret_cast(isolate) + + kIsolateEmbedderDataOffset; + return *reinterpret_cast(addr); + } + + static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) { + uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; + return reinterpret_cast(addr + index * kApiPointerSize); + } + template static inline T ReadField(Object* ptr, int offset) { uint8_t* addr = reinterpret_cast(ptr) + offset - kHeapObjectTag; @@ -4048,6 +4130,11 @@ Local Arguments::Data() const { } +Isolate* Arguments::GetIsolate() const { + return *reinterpret_cast(&implicit_args_[kIsolateIndex]); +} + + bool Arguments::IsConstructCall() const { return is_construct_call_; } @@ -4160,6 +4247,15 @@ String* String::Cast(v8::Value* value) { } +Local String::Empty(Isolate* isolate) { + typedef internal::Object* S; + typedef internal::Internals I; + if (!I::IsInitialized(isolate)) return Empty(); + S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex); + return Local(reinterpret_cast(slot)); +} + + String::ExternalStringResource* String::GetExternalStringResource() const { typedef internal::Object O; typedef internal::Internals I; @@ -4178,6 +4274,42 @@ String::ExternalStringResource* String::GetExternalStringResource() const { } +bool Value::IsUndefined() const { +#ifdef V8_ENABLE_CHECKS + return FullIsUndefined(); +#else + return QuickIsUndefined(); +#endif +} + +bool Value::QuickIsUndefined() const { + typedef internal::Object O; + typedef internal::Internals I; + O* obj = *reinterpret_cast(const_cast(this)); + if (!I::HasHeapObjectTag(obj)) return false; + if (I::GetInstanceType(obj) != I::kOddballType) return false; + return (I::GetOddballKind(obj) == I::kUndefinedOddballKind); +} + + +bool Value::IsNull() const { +#ifdef V8_ENABLE_CHECKS + return FullIsNull(); +#else + return QuickIsNull(); +#endif +} + +bool Value::QuickIsNull() const { + typedef internal::Object O; + typedef internal::Internals I; + O* obj = *reinterpret_cast(const_cast(this)); + if (!I::HasHeapObjectTag(obj)) return false; + if (I::GetInstanceType(obj) != I::kOddballType) return false; + return (I::GetOddballKind(obj) == I::kNullOddballKind); +} + + bool Value::IsString() const { #ifdef V8_ENABLE_CHECKS return FullIsString(); @@ -4283,6 +4415,11 @@ External* External::Cast(v8::Value* value) { } +Isolate* AccessorInfo::GetIsolate() const { + return *reinterpret_cast(&args_[-3]); +} + + Local AccessorInfo::Data() const { return Local(reinterpret_cast(&args_[-2])); } @@ -4298,6 +4435,54 @@ Local AccessorInfo::Holder() const { } +Handle Undefined(Isolate* isolate) { + typedef internal::Object* S; + typedef internal::Internals I; + if (!I::IsInitialized(isolate)) return Undefined(); + S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex); + return Handle(reinterpret_cast(slot)); +} + + +Handle Null(Isolate* isolate) { + typedef internal::Object* S; + typedef internal::Internals I; + if (!I::IsInitialized(isolate)) return Null(); + S* slot = I::GetRoot(isolate, I::kNullValueRootIndex); + return Handle(reinterpret_cast(slot)); +} + + +Handle True(Isolate* isolate) { + typedef internal::Object* S; + typedef internal::Internals I; + if (!I::IsInitialized(isolate)) return True(); + S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex); + return Handle(reinterpret_cast(slot)); +} + + +Handle False(Isolate* isolate) { + typedef internal::Object* S; + typedef internal::Internals I; + if (!I::IsInitialized(isolate)) return False(); + S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex); + return Handle(reinterpret_cast(slot)); +} + + +void Isolate::SetData(void* data) { + typedef internal::Internals I; + I::SetEmbedderData(this, data); +} + + +void* Isolate::GetData() { + typedef internal::Internals I; + return I::GetEmbedderData(this); +} + + /** * \example shell.cc * A simple shell that takes a list of expressions on the diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc index 1606a8f99c..7a84a2a0ff 100644 --- a/deps/v8/samples/lineprocessor.cc +++ b/deps/v8/samples/lineprocessor.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -434,9 +434,9 @@ v8::Handle ReadLine() { } if (res == NULL) { v8::Handle t = v8::Undefined(); - return reinterpret_cast&>(t); + return v8::Handle(v8::String::Cast(*t)); } - // remove newline char + // Remove newline char for (char* pos = buffer; *pos != '\0'; pos++) { if (*pos == '\n') { *pos = '\0'; diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp index 55b2a98acd..3c720a748a 100644 --- a/deps/v8/samples/samples.gyp +++ b/deps/v8/samples/samples.gyp @@ -1,4 +1,4 @@ -# Copyright 2011 the V8 project authors. All rights reserved. +# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -48,6 +48,12 @@ 'sources': [ 'process.cc', ], + }, + { + 'target_name': 'lineprocessor', + 'sources': [ + 'lineprocessor.cc', + ], } ], } diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index b40eca2f7c..db0cc1a930 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -67,17 +67,20 @@ static bool run_shell; int main(int argc, char* argv[]) { v8::V8::SetFlagsFromCommandLine(&argc, argv, true); run_shell = (argc == 1); - v8::HandleScope handle_scope; - v8::Persistent context = CreateShellContext(); - if (context.IsEmpty()) { - printf("Error creating context\n"); - return 1; + int result; + { + v8::HandleScope handle_scope; + v8::Persistent context = CreateShellContext(); + if (context.IsEmpty()) { + printf("Error creating context\n"); + return 1; + } + context->Enter(); + result = RunMain(argc, argv); + if (run_shell) RunShell(context); + context->Exit(); + context.Dispose(); } - context->Enter(); - int result = RunMain(argc, argv); - if (run_shell) RunShell(context); - context->Exit(); - context.Dispose(); v8::V8::Dispose(); return result; } diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 0d0b5357d5..2482b379ac 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -68,6 +68,7 @@ SOURCES = { diy-fp.cc dtoa.cc elements.cc + elements-kind.cc execution.cc factory.cc flags.cc diff --git a/deps/v8/src/allocation-inl.h b/deps/v8/src/allocation-inl.h index 04a3fe667a..d32db4b17f 100644 --- a/deps/v8/src/allocation-inl.h +++ b/deps/v8/src/allocation-inl.h @@ -34,12 +34,12 @@ namespace v8 { namespace internal { -void* PreallocatedStorage::New(size_t size) { +void* PreallocatedStorageAllocationPolicy::New(size_t size) { return Isolate::Current()->PreallocatedStorageNew(size); } -void PreallocatedStorage::Delete(void* p) { +void PreallocatedStorageAllocationPolicy::Delete(void* p) { return Isolate::Current()->PreallocatedStorageDelete(p); } diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h index 31067dda81..45bde4c4cb 100644 --- a/deps/v8/src/allocation.h +++ b/deps/v8/src/allocation.h @@ -104,7 +104,7 @@ char* StrNDup(const char* str, int n); // and free. Used as the default policy for lists. class FreeStoreAllocationPolicy { public: - INLINE(static void* New(size_t size)) { return Malloced::New(size); } + INLINE(void* New(size_t size)) { return Malloced::New(size); } INLINE(static void Delete(void* p)) { Malloced::Delete(p); } }; @@ -117,12 +117,6 @@ class PreallocatedStorage { explicit PreallocatedStorage(size_t size); size_t size() { return size_; } - // TODO(isolates): Get rid of these-- we'll have to change the allocator - // interface to include a pointer to an isolate to do this - // efficiently. - static inline void* New(size_t size); - static inline void Delete(void* p); - private: size_t size_; PreallocatedStorage* previous_; @@ -137,6 +131,12 @@ class PreallocatedStorage { }; +struct PreallocatedStorageAllocationPolicy { + INLINE(void* New(size_t size)); + INLINE(static void Delete(void* ptr)); +}; + + } } // namespace v8::internal #endif // V8_ALLOCATION_H_ diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 4e731fbec8..0d88047aa2 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -512,6 +512,16 @@ void RegisteredExtension::Register(RegisteredExtension* that) { } +void RegisteredExtension::UnregisterAll() { + RegisteredExtension* re = first_extension_; + while (re != NULL) { + RegisteredExtension* next = re->next(); + delete re; + re = next; + } +} + + void RegisterExtension(Extension* that) { RegisteredExtension* extension = new RegisteredExtension(that); RegisteredExtension::Register(extension); @@ -980,6 +990,12 @@ Local Signature::New(Handle receiver, } +Local AccessorSignature::New( + Handle receiver) { + return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver)); +} + + Local TypeSwitch::New(Handle type) { Handle types[1] = { type }; return TypeSwitch::New(1, types); @@ -1047,7 +1063,8 @@ static i::Handle MakeAccessorInfo( AccessorSetter setter, v8::Handle data, v8::AccessControl settings, - v8::PropertyAttribute attributes) { + v8::PropertyAttribute attributes, + v8::Handle signature) { i::Handle obj = FACTORY->NewAccessorInfo(); ASSERT(getter != NULL); SET_FIELD_WRAPPED(obj, set_getter, getter); @@ -1059,6 +1076,9 @@ static i::Handle MakeAccessorInfo( if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true); if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true); obj->set_property_attributes(static_cast(attributes)); + if (!signature.IsEmpty()) { + obj->set_expected_receiver_type(*Utils::OpenHandle(*signature)); + } return obj; } @@ -1069,7 +1089,8 @@ void FunctionTemplate::AddInstancePropertyAccessor( AccessorSetter setter, v8::Handle data, v8::AccessControl settings, - v8::PropertyAttribute attributes) { + v8::PropertyAttribute attributes, + v8::Handle signature) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::AddInstancePropertyAccessor()")) { @@ -1078,9 +1099,9 @@ void FunctionTemplate::AddInstancePropertyAccessor( ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle obj = MakeAccessorInfo(name, - getter, setter, data, - settings, attributes); + i::Handle obj = MakeAccessorInfo(name, getter, setter, data, + settings, attributes, + signature); i::Handle list(Utils::OpenHandle(this)->property_accessors()); if (list->IsUndefined()) { list = NeanderArray().value(); @@ -1265,7 +1286,8 @@ void ObjectTemplate::SetAccessor(v8::Handle name, AccessorSetter setter, v8::Handle data, AccessControl settings, - PropertyAttribute attribute) { + PropertyAttribute attribute, + v8::Handle signature) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return; ENTER_V8(isolate); @@ -1279,7 +1301,8 @@ void ObjectTemplate::SetAccessor(v8::Handle name, setter, data, settings, - attribute); + attribute, + signature); } @@ -2091,17 +2114,21 @@ bool StackFrame::IsConstructor() const { // --- D a t a --- -bool Value::IsUndefined() const { +bool Value::FullIsUndefined() const { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) { return false; } - return Utils::OpenHandle(this)->IsUndefined(); + bool result = Utils::OpenHandle(this)->IsUndefined(); + ASSERT_EQ(result, QuickIsUndefined()); + return result; } -bool Value::IsNull() const { +bool Value::FullIsNull() const { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false; - return Utils::OpenHandle(this)->IsNull(); + bool result = Utils::OpenHandle(this)->IsNull(); + ASSERT_EQ(result, QuickIsNull()); + return result; } @@ -2799,9 +2826,13 @@ bool v8::Object::ForceDelete(v8::Handle key) { i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); - // When turning on access checks for a global object deoptimize all functions - // as optimized code does not always handle access checks. - i::Deoptimizer::DeoptimizeGlobalObject(*self); + // When deleting a property on the global object using ForceDelete + // deoptimize all functions as optimized code does not check for the hole + // value with DontDelete properties. We have to deoptimize all contexts + // because of possible cross-context inlined functions. + if (self->IsJSGlobalProxy() || self->IsGlobalObject()) { + i::Deoptimizer::DeoptimizeAll(); + } EXCEPTION_PREAMBLE(isolate); i::Handle obj = i::ForceDeleteProperty(self, key_obj); @@ -3061,9 +3092,10 @@ bool Object::SetAccessor(Handle name, ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle info = MakeAccessorInfo(name, - getter, setter, data, - settings, attributes); + v8::Handle signature; + i::Handle info = MakeAccessorInfo(name, getter, setter, data, + settings, attributes, + signature); bool fast = Utils::OpenHandle(this)->HasFastProperties(); i::Handle result = i::SetAccessor(Utils::OpenHandle(this), info); if (result.is_null() || result->IsUndefined()) return false; @@ -4612,7 +4644,9 @@ void* External::Value() const { Local v8::String::Empty() { i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::Empty()"); + if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) { + return v8::Local(); + } LOG_API(isolate, "String::Empty()"); return Utils::ToLocal(isolate->factory()->empty_symbol()); } @@ -5020,7 +5054,7 @@ Local Array::CloneElementAt(uint32_t index) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local()); i::Handle self = Utils::OpenHandle(this); - if (!self->HasFastElements()) { + if (!self->HasFastObjectElements()) { return Local(); } i::FixedArray* elms = i::FixedArray::cast(self->elements()); @@ -5198,7 +5232,7 @@ void V8::AddImplicitReferences(Persistent parent, } -int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { +intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) { return 0; @@ -5378,17 +5412,6 @@ void Isolate::Exit() { } -void Isolate::SetData(void* data) { - i::Isolate* isolate = reinterpret_cast(this); - isolate->SetData(data); -} - -void* Isolate::GetData() { - i::Isolate* isolate = reinterpret_cast(this); - return isolate->GetData(); -} - - String::Utf8Value::Utf8Value(v8::Handle obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); @@ -5988,7 +6011,7 @@ Handle HeapGraphEdge::GetName() const { const HeapGraphNode* HeapGraphEdge::GetFromNode() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode"); - const i::HeapEntry* from = ToInternal(this)->From(); + const i::HeapEntry* from = ToInternal(this)->from(); return reinterpret_cast(from); } @@ -6022,7 +6045,7 @@ Handle HeapGraphNode::GetName() const { } -uint64_t HeapGraphNode::GetId() const { +SnapshotObjectId HeapGraphNode::GetId() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetId"); return ToInternal(this)->id(); @@ -6036,13 +6059,6 @@ int HeapGraphNode::GetSelfSize() const { } -int HeapGraphNode::GetRetainedSize() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize"); - return ToInternal(this)->retained_size(); -} - - int HeapGraphNode::GetChildrenCount() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); @@ -6054,29 +6070,7 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild"); return reinterpret_cast( - &ToInternal(this)->children()[index]); -} - - -int HeapGraphNode::GetRetainersCount() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount"); - return ToInternal(this)->retainers().length(); -} - - -const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer"); - return reinterpret_cast( - ToInternal(this)->retainers()[index]); -} - - -const HeapGraphNode* HeapGraphNode::GetDominatorNode() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode"); - return reinterpret_cast(ToInternal(this)->dominator()); + ToInternal(this)->children()[index]); } @@ -6137,18 +6131,18 @@ const HeapGraphNode* HeapSnapshot::GetRoot() const { } -const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const { +const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById"); return reinterpret_cast( - ToInternal(this)->GetEntryById(static_cast(id))); + ToInternal(this)->GetEntryById(id)); } int HeapSnapshot::GetNodesCount() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount"); - return ToInternal(this)->entries()->length(); + return ToInternal(this)->entries().length(); } @@ -6156,7 +6150,14 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode"); return reinterpret_cast( - ToInternal(this)->entries()->at(index)); + &ToInternal(this)->entries().at(index)); +} + + +SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId"); + return ToInternal(this)->max_snapshot_js_object_id(); } @@ -6201,6 +6202,14 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) { } +SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle value) { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId"); + i::Handle obj = Utils::OpenHandle(*value); + return i::HeapProfiler::GetSnapshotObjectId(obj); +} + + const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, HeapSnapshot::Type type, ActivityControl* control) { @@ -6220,6 +6229,27 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, } +void HeapProfiler::StartHeapObjectsTracking() { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking"); + i::HeapProfiler::StartHeapObjectsTracking(); +} + + +void HeapProfiler::StopHeapObjectsTracking() { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking"); + i::HeapProfiler::StopHeapObjectsTracking(); +} + + +SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats"); + return i::HeapProfiler::PushHeapObjectsStats(stream); +} + + void HeapProfiler::DeleteAllSnapshots() { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots"); @@ -6240,6 +6270,11 @@ int HeapProfiler::GetPersistentHandleCount() { } +size_t HeapProfiler::GetMemorySizeUsedByProfiler() { + return i::HeapProfiler::GetMemorySizeUsedByProfiler(); +} + + v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::kStressTypeOpt; @@ -6267,7 +6302,11 @@ static void SetFlagsFromString(const char* flags) { void Testing::PrepareStressRun(int run) { static const char* kLazyOptimizations = - "--prepare-always-opt --nolimit-inlining --noalways-opt"; + "--prepare-always-opt " + "--max-inlined-source-size=999999 " + "--max-inlined-nodes=999999 " + "--max-inlined-nodes-cumulative=999999 " + "--noalways-opt"; static const char* kForcedOptimizations = "--always-opt"; // If deoptimization stressed turn on frequent deoptimization. If no value diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 89cf0c864c..58e6a6e410 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle obj) v8::internal::Object* NeanderObject::get(int offset) { - ASSERT(value()->HasFastElements()); + ASSERT(value()->HasFastObjectElements()); return v8::internal::FixedArray::cast(value()->elements())->get(offset); } void NeanderObject::set(int offset, v8::internal::Object* value) { - ASSERT(value_->HasFastElements()); + ASSERT(value_->HasFastObjectElements()); v8::internal::FixedArray::cast(value_->elements())->set(offset, value); } @@ -146,6 +146,7 @@ class RegisteredExtension { public: explicit RegisteredExtension(Extension* extension); static void Register(RegisteredExtension* that); + static void UnregisterAll(); Extension* extension() { return extension_; } RegisteredExtension* next() { return next_; } RegisteredExtension* next_auto() { return next_auto_; } @@ -199,6 +200,8 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); + static inline Local AccessorSignatureToLocal( + v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); @@ -232,6 +235,8 @@ class Utils { OpenHandle(const v8::Context* context); static inline v8::internal::Handle OpenHandle(const v8::Signature* sig); + static inline v8::internal::Handle + OpenHandle(const v8::AccessorSignature* sig); static inline v8::internal::Handle OpenHandle(const v8::TypeSwitch* that); static inline v8::internal::Handle @@ -275,6 +280,7 @@ MAKE_TO_LOCAL(ToLocal, Foreign, External) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) +MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature) MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch) MAKE_TO_LOCAL(MessageToLocal, Object, Message) MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace) @@ -299,6 +305,7 @@ MAKE_OPEN_HANDLE(Template, TemplateInfo) MAKE_OPEN_HANDLE(FunctionTemplate, FunctionTemplateInfo) MAKE_OPEN_HANDLE(ObjectTemplate, ObjectTemplateInfo) MAKE_OPEN_HANDLE(Signature, SignatureInfo) +MAKE_OPEN_HANDLE(AccessorSignature, FunctionTemplateInfo) MAKE_OPEN_HANDLE(TypeSwitch, TypeSwitchInfo) MAKE_OPEN_HANDLE(Data, Object) MAKE_OPEN_HANDLE(RegExp, JSRegExp) diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h index 68579af1b3..71c0e1c2c4 100644 --- a/deps/v8/src/apiutils.h +++ b/deps/v8/src/apiutils.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -40,14 +40,17 @@ class ImplementationUtilities { } // Packs additional parameters for the NewArguments function. |implicit_args| - // is a pointer to the last element of 3-elements array controlled by GC. + // is a pointer to the last element of 4-elements array controlled by GC. static void PrepareArgumentsData(internal::Object** implicit_args, + internal::Isolate* isolate, internal::Object* data, internal::JSFunction* callee, internal::Object* holder) { implicit_args[v8::Arguments::kDataIndex] = data; implicit_args[v8::Arguments::kCalleeIndex] = callee; implicit_args[v8::Arguments::kHolderIndex] = holder; + implicit_args[v8::Arguments::kIsolateIndex] = + reinterpret_cast(isolate); } static v8::Arguments NewArguments(internal::Object** implicit_args, @@ -55,6 +58,8 @@ class ImplementationUtilities { bool is_construct_call) { ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction()); ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject()); + // The implicit isolate argument is not tagged and looks like a SMI. + ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi()); return v8::Arguments(implicit_args, argv, argc, is_construct_call); } diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index e9a32702cf..f8fb00c575 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -91,9 +91,11 @@ class CustomArguments : public Relocatable { Object* data, Object* self, JSObject* holder) : Relocatable(isolate) { - values_[2] = self; - values_[1] = holder; - values_[0] = data; + ASSERT(reinterpret_cast(isolate)->IsSmi()); + values_[3] = self; + values_[2] = holder; + values_[1] = data; + values_[0] = reinterpret_cast(isolate); } inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) { @@ -106,8 +108,9 @@ class CustomArguments : public Relocatable { void IterateInstance(ObjectVisitor* v); Object** end() { return values_ + ARRAY_SIZE(values_) - 1; } + private: - Object* values_[3]; + Object* values_[4]; }; diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index c99e778a7f..578bd810d4 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, Label* gc_required) { const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm, bool fill_with_hole, Label* gc_required) { // Load the initial map from the array function. - __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage); + __ LoadInitialArrayMap(array_function, scratch2, + elements_array_storage, fill_with_hole); if (FLAG_debug_code) { // Assert that array size is not zero. __ tst(array_size, array_size); @@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ b(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // r3: JSArray __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r2, r9, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index f772db9be2..761123f639 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Compute the return address in lr to return to after the jump below. Pc is // already at '+ 8' from the current instruction but return is after three // instructions so add another 4 to pc to get the return address. - masm->add(lr, pc, Operand(4)); - __ str(lr, MemOperand(sp, 0)); - masm->Jump(r5); + { + // Prevent literal pool emission before return address. + Assembler::BlockConstPoolScope block_const_pool(masm); + masm->add(lr, pc, Operand(4)); + __ str(lr, MemOperand(sp, 0)); + masm->Jump(r5); + } if (always_allocate) { // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 @@ -3956,14 +3960,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Jump to a faked try block that does the invoke, with a faked catch // block that sets the pending exception. __ jmp(&invoke); - __ bind(&handler_entry); - handler_offset_ = handler_entry.pos(); - // Caught exception: Store result (exception) in the pending exception - // field in the JSEnv and return a failure sentinel. Coming in here the - // fp will be invalid because the PushTryHandler below sets it to 0 to - // signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - isolate))); + + // Block literal pool emission whilst taking the position of the handler + // entry. This avoids making the assumption that literal pools are always + // emitted after an instruction is emitted, rather than before. + { + Assembler::BlockConstPoolScope block_const_pool(masm); + __ bind(&handler_entry); + handler_offset_ = handler_entry.pos(); + // Caught exception: Store result (exception) in the pending exception + // field in the JSEnv and return a failure sentinel. Coming in here the + // fp will be invalid because the PushTryHandler below sets it to 0 to + // signal the existence of the JSEntry frame. + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, + isolate))); + } __ str(r0, MemOperand(ip)); __ mov(r0, Operand(reinterpret_cast(Failure::Exception()))); __ b(&exit); @@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Branch and link to JSEntryTrampoline. We don't use the double underscore // macro for the add instruction because we don't want the coverage tool - // inserting instructions here after we read the pc. - __ mov(lr, Operand(pc)); - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + // inserting instructions here after we read the pc. We block literal pool + // emission for the same reason. + { + Assembler::BlockConstPoolScope block_const_pool(masm); + __ mov(lr, Operand(pc)); + masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + } // Unlink this frame from the handler chain. __ PopTryHandler(); @@ -4824,27 +4839,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); // Isolates: note we add an additional parameter here (isolate pointer). - const int kRegExpExecuteArguments = 8; + const int kRegExpExecuteArguments = 9; const int kParameterRegisters = 4; __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); // Stack pointer now points to cell where return address is to be written. // Arguments are before that on the stack or in registers. - // Argument 8 (sp[16]): Pass current isolate address. + // Argument 9 (sp[20]): Pass current isolate address. __ mov(r0, Operand(ExternalReference::isolate_address())); - __ str(r0, MemOperand(sp, 4 * kPointerSize)); + __ str(r0, MemOperand(sp, 5 * kPointerSize)); - // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. + // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 3 * kPointerSize)); + __ str(r0, MemOperand(sp, 4 * kPointerSize)); - // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. + // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(r0, Operand(0)); __ str(r0, MemOperand(sp, 2 * kPointerSize)); // Argument 5 (sp[4]): static offsets vector buffer. @@ -4893,7 +4913,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ cmp(r0, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. __ b(eq, &success); Label failure; __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); @@ -5169,9 +5191,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); __ b(ne, &call); // Patch the receiver on the stack with the global receiver object. - __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); - __ str(r2, MemOperand(sp, argc_ * kPointerSize)); + __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); + __ str(r3, MemOperand(sp, argc_ * kPointerSize)); __ bind(&call); } @@ -5179,9 +5201,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // r1: pushed function (to be verified) __ JumpIfSmi(r1, &non_function); // Get the map of the function object. - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); __ b(ne, &slow); + if (RecordCallTarget()) { + GenerateRecordCallTarget(masm); + } + // Fast-case: Invoke the function now. // r1: pushed function ParameterCount actual(argc_); @@ -5205,8 +5231,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. MegamorphicSentinel is an immortal immovable + // object (undefined) so no write barrier is needed. + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + } // Check for function proxy. - __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); __ b(ne, &non_function); __ push(r1); // put proxy as additional argument __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); @@ -5873,36 +5908,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: result string length __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); __ cmp(r2, Operand(r4, ASR, 1)); + // Return original string. __ b(eq, &return_r0); + // Longer than original string's length or negative: unsafe arguments. + __ b(hi, &runtime); + // Shorter than original string's length: an actual substring. - Label result_longer_than_two; - // Check for special case of two character ASCII string, in which case - // we do a lookup in the symbol table first. - __ cmp(r2, Operand(2)); - __ b(gt, &result_longer_than_two); - __ b(lt, &runtime); - - __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime); - - // Get the two characters forming the sub string. - __ add(r0, r0, Operand(r3)); - __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - __ jmp(&return_r0); - - // r2: result string length. - // r3: two characters combined into halfword in little endian byte order. - __ bind(&make_two_character_string); - __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); - __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ jmp(&return_r0); - - __ bind(&result_longer_than_two); // Deal with different string types: update the index if necessary // and put the underlying string into r5. // r0: original string @@ -6816,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { __ mov(lr, Operand(reinterpret_cast(GetCode().location()), RelocInfo::CODE_TARGET)); + + // Prevent literal pool emission during calculation of return address. + Assembler::BlockConstPoolScope block_const_pool(masm); + // Push return address (accessible to GC through exit frame pc). // Note that using pc with str is deprecated. Label start; @@ -7106,8 +7121,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, @@ -7176,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { // forth between a compare instructions (a nop in this position) and the // real branch when we start and stop incremental heap marking. // See RecordWriteStub::Patch for details. - __ b(&skip_to_incremental_noncompacting); - __ b(&skip_to_incremental_compacting); + { + // Block literal pool emission, as the position of these two instructions + // is assumed by the patching code. + Assembler::BlockConstPoolScope block_const_pool(masm); + __ b(&skip_to_incremental_noncompacting); + __ b(&skip_to_incremental_compacting); + } if (remembered_set_action_ == EMIT_REMEMBERED_SET) { __ RememberedSetHelper(object_, @@ -7370,9 +7390,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { Label fast_elements; __ CheckFastElements(r2, r5, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS __ JumpIfSmi(r0, &smi_element); - __ CheckFastSmiOnlyElements(r2, r5, &fast_elements); + __ CheckFastSmiElements(r2, r5, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -7384,7 +7404,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Push(r5, r4); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); @@ -7395,8 +7415,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. __ bind(&smi_element); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index befd8f2de7..e00afb9035 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : value @@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- r0 : value diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 96139a2597..3e7a1e9d0e 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -125,6 +125,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { Assembler::kDebugBreakSlotInstructions); } +const bool Debug::FramePaddingLayout::kIsSupported = false; + #define __ ACCESS_MASM(masm) diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 69b12ce5ee..ff7c3c139e 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED { Assembler::BlockConstPoolScope block_const_pool(masm_); __ bind(&patch_site_); __ cmp(reg, Operand(reg)); - // Don't use b(al, ...) as that might emit the constant pool right after the - // branch. After patching when the branch is no longer unconditional - // execution can continue into the constant pool. __ b(eq, target); // Always taken before patched. } @@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED { } void EmitPatchInfo() { + // Block literal pool emission whilst recording patch site information. + Assembler::BlockConstPoolScope block_const_pool(masm_); if (patch_site_.is_bound()) { int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); Register reg; @@ -112,13 +111,6 @@ class JumpPatchSite BASE_EMBEDDED { }; -// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove. -int FullCodeGenerator::self_optimization_header_size() { - UNREACHABLE(); - return 24; -} - - // Generate code for a JS function. On entry to the function the receiver // and arguments have been pushed on the stack left to right. The actual // argument count matches the formal parameter count expected by the @@ -275,11 +267,11 @@ void FullCodeGenerator::Generate() { // For named function expressions, declare the function name as a // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { - VariableProxy* proxy = scope()->function(); - ASSERT(proxy->var()->mode() == CONST || - proxy->var()->mode() == CONST_HARMONY); - ASSERT(proxy->var()->location() != Variable::UNALLOCATED); - EmitDeclaration(proxy, proxy->var()->mode(), NULL); + VariableDeclaration* function = scope()->function(); + ASSERT(function->proxy()->var()->mode() == CONST || + function->proxy()->var()->mode() == CONST_HARMONY); + ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED); + VisitVariableDeclaration(function); } VisitDeclarations(scope()->declarations()); } @@ -351,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142; void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, Label* back_edge_target) { Comment cmnt(masm_, "[ Stack check"); + // Block literal pools whilst emitting stack check code. + Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; if (FLAG_count_based_interrupts) { @@ -789,62 +783,52 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, } -void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - VariableMode mode, - FunctionLiteral* function) { +void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { + // The variable in the declaration always resides in the current function + // context. + ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); + if (FLAG_debug_code) { + // Check that we're not inside a with or catch context. + __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); + __ CompareRoot(r1, Heap::kWithContextMapRootIndex); + __ Check(ne, "Declaration in with context."); + __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); + __ Check(ne, "Declaration in catch context."); + } +} + + +void FullCodeGenerator::VisitVariableDeclaration( + VariableDeclaration* declaration) { // If it was not possible to allocate the variable at compile time, we // need to "declare" it at runtime to make sure it actually exists in the // local context. + VariableProxy* proxy = declaration->proxy(); + VariableMode mode = declaration->mode(); Variable* variable = proxy->var(); - bool binding_needs_init = (function == NULL) && - (mode == CONST || mode == CONST_HARMONY || mode == LET); + bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET; switch (variable->location()) { case Variable::UNALLOCATED: - ++global_count_; + globals_->Add(variable->name(), zone()); + globals_->Add(variable->binding_needs_init() + ? isolate()->factory()->the_hole_value() + : isolate()->factory()->undefined_value(), + zone()); break; case Variable::PARAMETER: case Variable::LOCAL: - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ str(result_register(), StackOperand(variable)); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, StackOperand(variable)); } break; case Variable::CONTEXT: - // The variable in the decl always resides in the current function - // context. - ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); - if (FLAG_debug_code) { - // Check that we're not inside a with or catch context. - __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); - __ CompareRoot(r1, Heap::kWithContextMapRootIndex); - __ Check(ne, "Declaration in with context."); - __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); - __ Check(ne, "Declaration in catch context."); - } - if (function != NULL) { - Comment cmnt(masm_, "[ Declaration"); - VisitForAccumulatorValue(function); - __ str(result_register(), ContextOperand(cp, variable->index())); - int offset = Context::SlotOffset(variable->index()); - // We know that we have written a function, which is not a smi. - __ RecordWriteContextSlot(cp, - offset, - result_register(), - r2, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (binding_needs_init) { - Comment cmnt(masm_, "[ Declaration"); + if (hole_init) { + Comment cmnt(masm_, "[ VariableDeclaration"); + EmitDebugCheckDeclarationContext(variable); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, ContextOperand(cp, variable->index())); // No write barrier since the_hole_value is in old space. @@ -853,13 +837,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, break; case Variable::LOOKUP: { - Comment cmnt(masm_, "[ Declaration"); + Comment cmnt(masm_, "[ VariableDeclaration"); __ mov(r2, Operand(variable->name())); // Declaration nodes are always introduced in one of four modes. - ASSERT(mode == VAR || - mode == CONST || - mode == CONST_HARMONY || - mode == LET); + ASSERT(mode == VAR || mode == LET || + mode == CONST || mode == CONST_HARMONY); PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE; __ mov(r1, Operand(Smi::FromInt(attr))); @@ -867,11 +849,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, // Note: For variables we must not push an initial value (such as // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. - if (function != NULL) { - __ Push(cp, r2, r1); - // Push initial value for function declaration. - VisitForStackValue(function); - } else if (binding_needs_init) { + if (hole_init) { __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); __ Push(cp, r2, r1, r0); } else { @@ -885,6 +863,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, } +void FullCodeGenerator::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: { + globals_->Add(variable->name(), zone()); + Handle function = + Compiler::BuildFunctionInfo(declaration->fun(), script()); + // Check for stack-overflow exception. + if (function.is_null()) return SetStackOverflow(); + globals_->Add(function, zone()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + VisitForAccumulatorValue(declaration->fun()); + __ str(result_register(), StackOperand(variable)); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + EmitDebugCheckDeclarationContext(variable); + VisitForAccumulatorValue(declaration->fun()); + __ str(result_register(), ContextOperand(cp, variable->index())); + int offset = Context::SlotOffset(variable->index()); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(cp, + offset, + result_register(), + r2, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(proxy->id(), NO_REGISTERS); + break; + } + + case Variable::LOOKUP: { + Comment cmnt(masm_, "[ FunctionDeclaration"); + __ mov(r2, Operand(variable->name())); + __ mov(r1, Operand(Smi::FromInt(NONE))); + __ Push(cp, r2, r1); + // Push initial value for function declaration. + VisitForStackValue(declaration->fun()); + __ CallRuntime(Runtime::kDeclareContextSlot, 4); + break; + } + } +} + + +void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + Handle instance = declaration->module()->interface()->Instance(); + ASSERT(!instance.is_null()); + + switch (variable->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + globals_->Add(variable->name(), zone()); + globals_->Add(instance, zone()); + Visit(declaration->module()); + break; + } + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ mov(r1, Operand(instance)); + __ str(r1, ContextOperand(cp, variable->index())); + Visit(declaration->module()); + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) { + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + switch (variable->location()) { + case Variable::UNALLOCATED: + // TODO(rossberg) + break; + + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ImportDeclaration"); + EmitDebugCheckDeclarationContext(variable); + // TODO(rossberg) + break; + } + + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { + // TODO(rossberg) +} + + void FullCodeGenerator::DeclareGlobals(Handle pairs) { // Call the runtime to declare the globals. // The context is the first argument. @@ -1511,7 +1605,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { // Mark all computed expressions that are bound to a key that // is shadowed by a later occurrence of the same key. For the // marked expressions, no store code is emitted. - expr->CalculateEmitStore(); + expr->CalculateEmitStore(zone()); AccessorTable accessor_table(isolate()->zone()); for (int i = 0; i < expr->properties()->length(); i++) { @@ -1609,7 +1703,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast(Smi::cast(constant_elements->get(0))->value()); - bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); Handle constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1630,8 +1724,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); FastCloneShallowArrayStub::Mode mode = has_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1659,7 +1752,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(constant_elements_kind)) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); @@ -2271,6 +2364,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { } // Record source position for debugger. SetSourcePosition(expr->position()); + + // Record call targets in unoptimized code, but not in the snapshot. + if (!Serializer::enabled()) { + flags = static_cast(flags | RECORD_CALL_TARGET); + Handle uninitialized = + TypeFeedbackCells::UninitializedSentinel(isolate()); + Handle cell = + isolate()->factory()->NewJSGlobalPropertyCell(uninitialized); + RecordTypeFeedbackCell(expr->id(), cell); + __ mov(r2, Operand(cell)); + } + CallFunctionStub stub(arg_count, flags); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ CallStub(&stub); @@ -3564,7 +3669,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); - __ add(string_length, string_length, Operand(scratch1)); + __ add(string_length, string_length, Operand(scratch1), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); __ b(lt, &loop); @@ -3601,7 +3706,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ b(ne, &bailout); __ tst(scratch2, Operand(0x80000000)); __ b(ne, &bailout); - __ add(string_length, string_length, Operand(scratch2)); + __ add(string_length, string_length, Operand(scratch2), SetCC); __ b(vs, &bailout); __ SmiUntag(string_length); @@ -4357,7 +4462,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) { void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { Scope* declaration_scope = scope()->DeclarationScope(); - if (declaration_scope->is_global_scope()) { + if (declaration_scope->is_global_scope() || + declaration_scope->is_module_scope()) { // Contexts nested in the global context have a canonical empty function // as their closure, not the anonymous closure containing the global // code. Pass a smi sentinel and let the runtime look up the empty @@ -4388,14 +4494,55 @@ void FullCodeGenerator::EnterFinallyBlock() { ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); STATIC_ASSERT(kSmiTag == 0); __ add(r1, r1, Operand(r1)); // Convert to smi. + + // Store result register while executing finally block. + __ push(r1); + + // Store pending message while executing finally block. + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(ip, Operand(pending_message_obj)); + __ ldr(r1, MemOperand(ip)); + __ push(r1); + + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(ip, Operand(has_pending_message)); + __ ldr(r1, MemOperand(ip)); + __ push(r1); + + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(ip, Operand(pending_message_script)); + __ ldr(r1, MemOperand(ip)); __ push(r1); } void FullCodeGenerator::ExitFinallyBlock() { ASSERT(!result_register().is(r1)); + // Restore pending message from stack. + __ pop(r1); + ExternalReference pending_message_script = + ExternalReference::address_of_pending_message_script(isolate()); + __ mov(ip, Operand(pending_message_script)); + __ str(r1, MemOperand(ip)); + + __ pop(r1); + ExternalReference has_pending_message = + ExternalReference::address_of_has_pending_message(isolate()); + __ mov(ip, Operand(has_pending_message)); + __ str(r1, MemOperand(ip)); + + __ pop(r1); + ExternalReference pending_message_obj = + ExternalReference::address_of_pending_message_obj(isolate()); + __ mov(ip, Operand(pending_message_obj)); + __ str(r1, MemOperand(ip)); + // Restore result register from stack. __ pop(r1); + // Uncook return address and return. __ pop(result_register()); ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index c88c257092..fd93480986 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in r0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ mov(r0, r2); __ Ret(); __ bind(&fail); @@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); __ b(ne, &non_double_value); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> + // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { Address cmp_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) { Instr instr_at_patch = Assembler::instr_at(patch_address); Instr branch_instr = Assembler::instr_at(patch_address + Instruction::kInstrSize); - ASSERT(Assembler::IsCmpRegister(instr_at_patch)); - ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), - Assembler::GetRm(instr_at_patch).code()); + // This is patching a conditional "jump if not smi/jump if smi" site. + // Enabling by changing from + // cmp rx, rx + // b eq/ne, + // to + // tst rx, #kSmiTagMask + // b ne/eq, + // and vice-versa to be disabled again. + CodePatcher patcher(patch_address, 2); + Register reg = Assembler::GetRn(instr_at_patch); + if (check == ENABLE_INLINED_SMI_CHECK) { + ASSERT(Assembler::IsCmpRegister(instr_at_patch)); + ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), + Assembler::GetRm(instr_at_patch).code()); + patcher.masm()->tst(reg, Operand(kSmiTagMask)); + } else { + ASSERT(check == DISABLE_INLINED_SMI_CHECK); + ASSERT(Assembler::IsTstImmediate(instr_at_patch)); + patcher.masm()->cmp(reg, reg); + } ASSERT(Assembler::IsBranch(branch_instr)); if (Assembler::GetCondition(branch_instr) == eq) { - // This is patching a "jump if not smi" site to be active. - // Changing - // cmp rx, rx - // b eq, - // to - // tst rx, #kSmiTagMask - // b ne, - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(ne); } else { ASSERT(Assembler::GetCondition(branch_instr) == ne); - // This is patching a "jump if smi" site to be active. - // Changing - // cmp rx, rx - // b ne, - // to - // tst rx, #kSmiTagMask - // b eq, - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(eq); } } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index c3dd1cbaa2..283862c787 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) { } -template -void LTemplateInstruction::PrintDataTo(StringStream* stream) { +void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); - for (int i = 0; i < inputs_.length(); i++) { + for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - inputs_[i]->PrintTo(stream); + InputAt(i)->PrintTo(stream); } } -template -void LTemplateInstruction::PrintOutputOperandTo(StringStream* stream) { - for (int i = 0; i < results_.length(); i++) { - if (i > 0) stream->Add(" "); - results_[i]->PrintTo(stream); - } +void LInstruction::PrintOutputOperandTo(StringStream* stream) { + if (HasResult()) result()->PrintTo(stream); } @@ -416,9 +411,9 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph) : spill_slot_count_(0), info_(info), graph_(graph), - instructions_(32), - pointer_maps_(8), - inlined_closures_(1) { + instructions_(32, graph->zone()), + pointer_maps_(8, graph->zone()), + inlined_closures_(1, graph->zone()) { } @@ -432,9 +427,9 @@ int LChunk::GetNextSpillIndex(bool is_double) { LOperand* LChunk::GetNextSpillSlot(bool is_double) { int index = GetNextSpillIndex(is_double); if (is_double) { - return LDoubleStackSlot::Create(index); + return LDoubleStackSlot::Create(index, zone()); } else { - return LStackSlot::Create(index); + return LStackSlot::Create(index, zone()); } } @@ -479,23 +474,23 @@ void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block); int index = -1; if (instr->IsControl()) { - instructions_.Add(gap); + instructions_.Add(gap, zone()); index = instructions_.length(); - instructions_.Add(instr); + instructions_.Add(instr, zone()); } else { index = instructions_.length(); - instructions_.Add(instr); - instructions_.Add(gap); + instructions_.Add(instr, zone()); + instructions_.Add(gap, zone()); } if (instr->HasPointerMap()) { - pointer_maps_.Add(instr->pointer_map()); + pointer_maps_.Add(instr->pointer_map(), zone()); instr->pointer_map()->set_lithium_position(index); } } LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) { - return LConstantOperand::Create(constant->id()); + return LConstantOperand::Create(constant->id(), zone()); } @@ -534,7 +529,8 @@ int LChunk::NearestGapPos(int index) const { void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { - GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to); + GetGapAt(index)->GetOrCreateParallelMove( + LGap::START, zone())->AddMove(from, to, zone()); } @@ -732,22 +728,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { } -LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id) { - ASSERT(instruction_pending_deoptimization_environment_ == NULL); - ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); - instruction_pending_deoptimization_environment_ = instr; - pending_deoptimization_ast_id_ = ast_id; - return instr; -} - - -void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() { - instruction_pending_deoptimization_environment_ = NULL; - pending_deoptimization_ast_id_ = AstNode::kNoNumber; -} - - LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { @@ -760,8 +740,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, if (hinstr->HasObservableSideEffects()) { ASSERT(hinstr->next()->IsSimulate()); HSimulate* sim = HSimulate::cast(hinstr->next()); - instr = SetInstructionPendingDeoptimizationEnvironment( - instr, sim->ast_id()); + ASSERT(instruction_pending_deoptimization_environment_ == NULL); + ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber); + instruction_pending_deoptimization_environment_ = instr; + pending_deoptimization_ast_id_ = sim->ast_id(); } // If instruction does not have side-effects lazy deoptimization @@ -779,15 +761,9 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, } -LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) { - instr->MarkAsSaveDoubles(); - return instr; -} - - LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_)); + instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); return instr; } @@ -1010,7 +986,8 @@ LEnvironment* LChunkBuilder::CreateEnvironment( hydrogen_env->parameter_count(), argument_count_, value_count, - outer); + outer, + zone()); int argument_index = *argument_index_accumulator; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1295,6 +1272,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) { ASSERT(instr->value()->representation().IsInteger32()); ASSERT(instr->representation().IsInteger32()); + if (instr->HasNoUses()) return NULL; LOperand* value = UseRegisterAtStart(instr->value()); return DefineAsRegister(new(zone()) LBitNotI(value)); } @@ -1319,6 +1297,76 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { } +bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) { + uint32_t divisor_abs = abs(divisor); + // Dividing by 0, 1, and powers of 2 is easy. + // Note that IsPowerOf2(0) returns true; + ASSERT(IsPowerOf2(0) == true); + if (IsPowerOf2(divisor_abs)) return true; + + // We have magic numbers for a few specific divisors. + // Details and proofs can be found in: + // - Hacker's Delight, Henry S. Warren, Jr. + // - The PowerPC Compiler Writer’s Guide + // and probably many others. + // + // We handle + // * + // but not + // * + int32_t power_of_2_factor = + CompilerIntrinsics::CountTrailingZeros(divisor_abs); + DivMagicNumbers magic_numbers = + DivMagicNumberFor(divisor_abs >> power_of_2_factor); + if (magic_numbers.M != InvalidDivMagicNumber.M) return true; + + return false; +} + + +HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) { + // A value with an integer representation does not need to be transformed. + if (dividend->representation().IsInteger32()) { + return dividend; + // A change from an integer32 can be replaced by the integer32 value. + } else if (dividend->IsChange() && + HChange::cast(dividend)->from().IsInteger32()) { + return HChange::cast(dividend)->value(); + } + return NULL; +} + + +HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) { + // Only optimize when we have magic numbers for the divisor. + // The standard integer division routine is usually slower than transitionning + // to VFP. + if (divisor->IsConstant() && + HConstant::cast(divisor)->HasInteger32Value()) { + HConstant* constant_val = HConstant::cast(divisor); + int32_t int32_val = constant_val->Integer32Value(); + if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) { + return constant_val->CopyToRepresentation(Representation::Integer32(), + divisor->block()->zone()); + } + } + return NULL; +} + + +LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { + HValue* right = instr->right(); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegisterOrConstant(right); + LOperand* remainder = TempRegister(); + ASSERT(right->IsConstant() && + HConstant::cast(right)->HasInteger32Value() && + HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())); + return AssignEnvironment(DefineAsRegister( + new(zone()) LMathFloorOfDiv(dividend, divisor, remainder))); +} + + LInstruction* LChunkBuilder::DoMod(HMod* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1612,7 +1660,8 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) { LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LOperand* object = UseFixed(instr->value(), r0); - LDateField* result = new LDateField(object, FixedTemp(r1), instr->index()); + LDateField* result = + new(zone()) LDateField(object, FixedTemp(r1), instr->index()); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1661,10 +1710,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegisterAtStart(instr->value()); - bool needs_check = !instr->value()->type().IsSmi(); LInstruction* res = NULL; - if (!needs_check) { - res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check)); + if (instr->value()->type().IsSmi()) { + res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); } else { LOperand* temp1 = TempRegister(); LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() @@ -1753,9 +1801,9 @@ LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { } -LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { +LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { LOperand* value = UseRegisterAtStart(instr->value()); - LInstruction* result = new(zone()) LCheckMap(value); + LInstruction* result = new(zone()) LCheckMaps(value); return AssignEnvironment(result); } @@ -2037,8 +2085,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = @@ -2059,16 +2108,28 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); - - LOperand* obj = needs_write_barrier - ? UseTempRegister(instr->object()) - : UseRegisterAtStart(instr->object()); + bool needs_write_barrier_for_map = !instr->transition().is_null() && + instr->NeedsWriteBarrierForMap(); + + LOperand* obj; + if (needs_write_barrier) { + obj = instr->is_in_object() + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else { + obj = needs_write_barrier_for_map + ? UseRegister(instr->object()) + : UseRegisterAtStart(instr->object()); + } LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - return new(zone()) LStoreNamedField(obj, val); + // We need a temporary register for write barrier of the map field. + LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; + + return new(zone()) LStoreNamedField(obj, val, temp); } @@ -2111,7 +2172,8 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { - LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister()); + LAllocateObject* result = + new(zone()) LAllocateObject(TempRegister(), TempRegister()); return AssignPointerMap(DefineAsRegister(result)); } @@ -2242,9 +2304,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { if (pending_deoptimization_ast_id_ == instr->ast_id()) { LInstruction* result = new(zone()) LLazyBailout; result = AssignEnvironment(result); + // Store the lazy deopt environment with the instruction if needed. Right + // now it is only used for LInstanceOfKnownGlobal. instruction_pending_deoptimization_environment_-> - set_deoptimization_environment(result->environment()); - ClearInstructionPendingDeoptimizationEnvironment(); + SetDeferredLazyDeoptimizationEnvironment(result->environment()); + instruction_pending_deoptimization_environment_ = NULL; + pending_deoptimization_ast_id_ = AstNode::kNoNumber; return result; } @@ -2271,8 +2336,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { undefined, instr->call_kind(), instr->is_construct()); - if (instr->arguments() != NULL) { - inner->Bind(instr->arguments(), graph()->GetArgumentsObject()); + if (instr->arguments_var() != NULL) { + inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } current_block_->UpdateEnvironment(inner); chunk_->AddInlinedClosure(instr->closure()); @@ -2281,10 +2346,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { + LInstruction* pop = NULL; + + HEnvironment* env = current_block_->last_environment(); + + if (instr->arguments_pushed()) { + int argument_count = env->arguments_environment()->parameter_count(); + pop = new(zone()) LDrop(argument_count); + argument_count_ -= argument_count; + } + HEnvironment* outer = current_block_->last_environment()-> DiscardInlined(false); current_block_->UpdateEnvironment(outer); - return NULL; + + return pop; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 62cde6e249..869a80a280 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -72,7 +72,7 @@ class LCodeGen; V(CheckFunction) \ V(CheckInstanceType) \ V(CheckNonSmi) \ - V(CheckMap) \ + V(CheckMaps) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ V(ClampDToUint8) \ @@ -132,6 +132,7 @@ class LCodeGen; V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ + V(MathFloorOfDiv) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -179,7 +180,8 @@ class LCodeGen; V(CheckMapValue) \ V(LoadFieldByIndex) \ V(DateField) \ - V(WrapReceiver) + V(WrapReceiver) \ + V(Drop) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -203,15 +205,14 @@ class LInstruction: public ZoneObject { LInstruction() : environment_(NULL), hydrogen_value_(NULL), - is_call_(false), - is_save_doubles_(false) { } + is_call_(false) { } virtual ~LInstruction() { } virtual void CompileToNative(LCodeGen* generator) = 0; virtual const char* Mnemonic() const = 0; virtual void PrintTo(StringStream* stream); - virtual void PrintDataTo(StringStream* stream) = 0; - virtual void PrintOutputOperandTo(StringStream* stream) = 0; + virtual void PrintDataTo(StringStream* stream); + virtual void PrintOutputOperandTo(StringStream* stream); enum Opcode { // Declare a unique enum value for each instruction. @@ -246,22 +247,12 @@ class LInstruction: public ZoneObject { void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } - void set_deoptimization_environment(LEnvironment* env) { - deoptimization_environment_.set(env); - } - LEnvironment* deoptimization_environment() const { - return deoptimization_environment_.get(); - } - bool HasDeoptimizationEnvironment() const { - return deoptimization_environment_.is_set(); - } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } void MarkAsCall() { is_call_ = true; } - void MarkAsSaveDoubles() { is_save_doubles_ = true; } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } - bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; } virtual bool HasResult() const = 0; virtual LOperand* result() = 0; @@ -282,9 +273,7 @@ class LInstruction: public ZoneObject { LEnvironment* environment_; SetOncePointer pointer_map_; HValue* hydrogen_value_; - SetOncePointer deoptimization_environment_; bool is_call_; - bool is_save_doubles_; }; @@ -306,9 +295,6 @@ class LTemplateInstruction: public LInstruction { int TempCount() { return T; } LOperand* TempAt(int i) { return temps_[i]; } - virtual void PrintDataTo(StringStream* stream); - virtual void PrintOutputOperandTo(StringStream* stream); - protected: EmbeddedContainer results_; EmbeddedContainer inputs_; @@ -347,8 +333,10 @@ class LGap: public LTemplateInstruction<0, 0, 0> { LAST_INNER_POSITION = AFTER }; - LParallelMove* GetOrCreateParallelMove(InnerPosition pos) { - if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove; + LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) { + if (parallel_moves_[pos] == NULL) { + parallel_moves_[pos] = new(zone) LParallelMove(zone); + } return parallel_moves_[pos]; } @@ -534,9 +522,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> { class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { public: - LArgumentsElements() { } - DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements") + DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements) }; @@ -582,6 +569,21 @@ class LDivI: public LTemplateInstruction<1, 2, 0> { }; +class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> { + public: + LMathFloorOfDiv(LOperand* left, + LOperand* right, + LOperand* temp = NULL) { + inputs_[0] = left; + inputs_[1] = right; + temps_[0] = temp; + } + + DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div") + DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv) +}; + + class LMulI: public LTemplateInstruction<1, 2, 1> { public: LMulI(LOperand* left, LOperand* right, LOperand* temp) { @@ -834,6 +836,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal) Handle function() const { return hydrogen()->function(); } + LEnvironment* GetDeferredLazyDeoptimizationEnvironment() { + return lazy_deopt_env_; + } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { + lazy_deopt_env_ = env; + } + + private: + LEnvironment* lazy_deopt_env_; }; @@ -1227,6 +1238,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1243,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1263,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1378,6 +1391,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> { }; +class LDrop: public LTemplateInstruction<0, 0, 0> { + public: + explicit LDrop(int count) : count_(count) { } + + int count() const { return count_; } + + DECLARE_CONCRETE_INSTRUCTION(Drop, "drop") + + private: + int count_; +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1460,6 +1486,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } + Handle known_function() { return hydrogen()->known_function(); } }; @@ -1659,11 +1686,12 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { public: - LStoreNamedField(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) { inputs_[0] = obj; inputs_[1] = val; + temps_[0] = temp; } DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") @@ -1717,6 +1745,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1739,6 +1768,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } + + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -1781,6 +1813,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1889,14 +1922,14 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> { }; -class LCheckMap: public LTemplateInstruction<0, 1, 0> { +class LCheckMaps: public LTemplateInstruction<0, 1, 0> { public: - explicit LCheckMap(LOperand* value) { + explicit LCheckMaps(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map") - DECLARE_HYDROGEN_ACCESSOR(CheckMap) + DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps") + DECLARE_HYDROGEN_ACCESSOR(CheckMaps) }; @@ -2236,9 +2269,11 @@ class LChunk: public ZoneObject { } void AddInlinedClosure(Handle closure) { - inlined_closures_.Add(closure); + inlined_closures_.Add(closure, zone()); } + Zone* zone() const { return graph_->zone(); } + private: int spill_slot_count_; CompilationInfo* info_; @@ -2255,7 +2290,7 @@ class LChunkBuilder BASE_EMBEDDED { : chunk_(NULL), info_(info), graph_(graph), - zone_(graph->isolate()->zone()), + zone_(graph->zone()), status_(UNUSED), current_instruction_(NULL), current_block_(NULL), @@ -2274,6 +2309,10 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + static bool HasMagicNumberForDivisor(int32_t divisor); + static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); + static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); + private: enum Status { UNUSED, @@ -2369,11 +2408,6 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY); - LInstruction* MarkAsSaveDoubles(LInstruction* instr); - - LInstruction* SetInstructionPendingDeoptimizationEnvironment( - LInstruction* instr, int ast_id); - void ClearInstructionPendingDeoptimizationEnvironment(); LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, int* argument_index_accumulator); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 82b80a2b80..256d180f2f 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle code, LInstruction* instr, SafepointMode safepoint_mode) { ASSERT(instr != NULL); + // Block literal pool emission to ensure nop indicating no inlined smi code + // is in the correct position. + Assembler::BlockConstPoolScope block_const_pool(masm()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); __ Call(code, mode); @@ -631,14 +634,15 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, ++jsframe_count; } } - Translation translation(&translations_, frame_count, jsframe_count); + Translation translation(&translations_, frame_count, jsframe_count, + zone()); WriteTranslation(environment, &translation); int deoptimization_index = deoptimizations_.length(); int pc_offset = masm()->pc_offset(); environment->Register(deoptimization_index, translation.index(), (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); - deoptimizations_.Add(environment); + deoptimizations_.Add(environment, zone()); } } @@ -670,7 +674,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { // jump entry if this is the case. if (deopt_jump_table_.is_empty() || (deopt_jump_table_.last().address != entry)) { - deopt_jump_table_.Add(JumpTableEntry(entry)); + deopt_jump_table_.Add(JumpTableEntry(entry), zone()); } __ b(cc, &deopt_jump_table_.last().label); } @@ -715,7 +719,7 @@ int LCodeGen::DefineDeoptimizationLiteral(Handle literal) { for (int i = 0; i < deoptimization_literals_.length(); ++i) { if (deoptimization_literals_[i].is_identical_to(literal)) return i; } - deoptimization_literals_.Add(literal); + deoptimization_literals_.Add(literal, zone()); return result; } @@ -761,14 +765,14 @@ void LCodeGen::RecordSafepoint( for (int i = 0; i < operands->length(); i++) { LOperand* pointer = operands->at(i); if (pointer->IsStackSlot()) { - safepoint.DefinePointerSlot(pointer->index()); + safepoint.DefinePointerSlot(pointer->index(), zone()); } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { - safepoint.DefinePointerRegister(ToRegister(pointer)); + safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } if (kind & Safepoint::kWithRegisters) { // Register cp always contains a pointer to the context. - safepoint.DefinePointerRegister(cp); + safepoint.DefinePointerRegister(cp, zone()); } } @@ -780,7 +784,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition); + LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); RecordSafepoint(&empty_pointers, deopt_mode); } @@ -1034,6 +1038,100 @@ void LCodeGen::DoModI(LModI* instr) { } +void LCodeGen::EmitSignedIntegerDivisionByConstant( + Register result, + Register dividend, + int32_t divisor, + Register remainder, + Register scratch, + LEnvironment* environment) { + ASSERT(!AreAliased(dividend, scratch, ip)); + ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); + + uint32_t divisor_abs = abs(divisor); + + int32_t power_of_2_factor = + CompilerIntrinsics::CountTrailingZeros(divisor_abs); + + switch (divisor_abs) { + case 0: + DeoptimizeIf(al, environment); + return; + + case 1: + if (divisor > 0) { + __ Move(result, dividend); + } else { + __ rsb(result, dividend, Operand(0), SetCC); + DeoptimizeIf(vs, environment); + } + // Compute the remainder. + __ mov(remainder, Operand(0)); + return; + + default: + if (IsPowerOf2(divisor_abs)) { + // Branch and condition free code for integer division by a power + // of two. + int32_t power = WhichPowerOf2(divisor_abs); + if (power > 1) { + __ mov(scratch, Operand(dividend, ASR, power - 1)); + } + __ add(scratch, dividend, Operand(scratch, LSR, 32 - power)); + __ mov(result, Operand(scratch, ASR, power)); + // Negate if necessary. + // We don't need to check for overflow because the case '-1' is + // handled separately. + if (divisor < 0) { + ASSERT(divisor != -1); + __ rsb(result, result, Operand(0)); + } + // Compute the remainder. + if (divisor > 0) { + __ sub(remainder, dividend, Operand(result, LSL, power)); + } else { + __ add(remainder, dividend, Operand(result, LSL, power)); + } + return; + } else { + // Use magic numbers for a few specific divisors. + // Details and proofs can be found in: + // - Hacker's Delight, Henry S. Warren, Jr. + // - The PowerPC Compiler Writer’s Guide + // and probably many others. + // + // We handle + // * + // but not + // * + DivMagicNumbers magic_numbers = + DivMagicNumberFor(divisor_abs >> power_of_2_factor); + // Branch and condition free code for integer division by a power + // of two. + const int32_t M = magic_numbers.M; + const int32_t s = magic_numbers.s + power_of_2_factor; + + __ mov(ip, Operand(M)); + __ smull(ip, scratch, dividend, ip); + if (M < 0) { + __ add(scratch, scratch, Operand(dividend)); + } + if (s > 0) { + __ mov(scratch, Operand(scratch, ASR, s)); + } + __ add(result, scratch, Operand(dividend, LSR, 31)); + if (divisor < 0) __ rsb(result, result, Operand(0)); + // Compute the remainder. + __ mov(ip, Operand(divisor)); + // This sequence could be replaced with 'mls' when + // it gets implemented. + __ mul(scratch, result, ip); + __ sub(remainder, dividend, scratch); + } + } +} + + void LCodeGen::DoDivI(LDivI* instr) { class DeferredDivI: public LDeferredCode { public: @@ -1096,7 +1194,7 @@ void LCodeGen::DoDivI(LDivI* instr) { // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. - DeferredDivI* deferred = new DeferredDivI(this, instr); + DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); __ TrySmiTag(right, &deoptimize, scratch); @@ -1115,6 +1213,34 @@ void LCodeGen::DoDivI(LDivI* instr) { } +void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { + const Register result = ToRegister(instr->result()); + const Register left = ToRegister(instr->InputAt(0)); + const Register remainder = ToRegister(instr->TempAt(0)); + const Register scratch = scratch0(); + + // We only optimize this for division by constants, because the standard + // integer division routine is usually slower than transitionning to VFP. + // This could be optimized on processors with SDIV available. + ASSERT(instr->InputAt(1)->IsConstantOperand()); + int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1))); + if (divisor < 0) { + __ cmp(left, Operand(0)); + DeoptimizeIf(eq, instr->environment()); + } + EmitSignedIntegerDivisionByConstant(result, + left, + divisor, + remainder, + scratch, + instr->environment()); + // We operated a truncating division. Correct the result if necessary. + __ cmp(remainder, Operand(0)); + __ teq(remainder, Operand(divisor), ne); + __ sub(result, result, Operand(1), LeaveCC, mi); +} + + template void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, Token::Value op) { @@ -1562,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->result()).is(r0)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); + // Block literal pool emission to ensure nop indicating no inlined smi code + // is in the correct position. + Assembler::BlockConstPoolScope block_const_pool(masm()); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } @@ -2174,7 +2303,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { }; DeferredInstanceOfKnownGlobal* deferred; - deferred = new DeferredInstanceOfKnownGlobal(this, instr); + deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); Label done, false_result; Register object = ToRegister(instr->InputAt(0)); @@ -2193,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Label cache_miss; Register map = temp; __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); - __ bind(deferred->map_check()); // Label for calculating code patching. - // We use Factory::the_hole_value() on purpose instead of loading from the - // root array to force relocation to be able to later patch with - // the cached map. - Handle cell = - factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); - __ mov(ip, Operand(Handle(cell))); - __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - __ cmp(map, Operand(ip)); - __ b(ne, &cache_miss); - // We use Factory::the_hole_value() on purpose instead of loading from the - // root array to force relocation to be able to later patch - // with true or false. - __ mov(result, Operand(factory()->the_hole_value())); + { + // Block constant pool emission to ensure the positions of instructions are + // as expected by the patcher. See InstanceofStub::Generate(). + Assembler::BlockConstPoolScope block_const_pool(masm()); + __ bind(deferred->map_check()); // Label for calculating code patching. + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch with + // the cached map. + Handle cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ mov(ip, Operand(Handle(cell))); + __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); + __ cmp(map, Operand(ip)); + __ b(ne, &cache_miss); + // We use Factory::the_hole_value() on purpose instead of loading from the + // root array to force relocation to be able to later patch + // with true or false. + __ mov(result, Operand(factory()->the_hole_value())); + } __ b(&done); // The inlined call site cache did not match. Check null and string before @@ -2267,8 +2401,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); - ASSERT(instr->HasDeoptimizationEnvironment()); - LEnvironment* env = instr->deoptimization_environment(); + LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); // Put the result value into the result register slot and // restore all registers. @@ -2438,12 +2571,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, Register object, Handle type, - Handle name) { + Handle name, + LEnvironment* env) { LookupResult lookup(isolate()); type->LookupInDescriptors(NULL, *name, &lookup); - ASSERT(lookup.IsFound() && - (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION)); - if (lookup.type() == FIELD) { + ASSERT(lookup.IsFound() || lookup.IsCacheable()); + if (lookup.IsFound() && lookup.type() == FIELD) { int index = lookup.GetLocalFieldIndexFromMap(*type); int offset = index * kPointerSize; if (index < 0) { @@ -2455,9 +2588,23 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); } - } else { + } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) { Handle function(lookup.GetConstantFunctionFromMap(*type)); __ LoadHeapObject(result, function); + } else { + // Negative lookup. + // Check prototypes. + HeapObject* current = HeapObject::cast((*type)->prototype()); + Heap* heap = type->GetHeap(); + while (current != heap->null_value()) { + Handle link(current); + __ LoadHeapObject(result, link); + __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset)); + __ cmp(result, Operand(Handle(JSObject::cast(current)->map()))); + DeoptimizeIf(ne, env); + current = HeapObject::cast(current->map()->prototype()); + } + __ LoadRoot(result, Heap::kUndefinedValueRootIndex); } } @@ -2465,43 +2612,45 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { Register object = ToRegister(instr->object()); Register result = ToRegister(instr->result()); - Register scratch = scratch0(); + Register object_map = scratch0(); + int map_count = instr->hydrogen()->types()->length(); + bool need_generic = instr->hydrogen()->need_generic(); + + if (map_count == 0 && !need_generic) { + DeoptimizeIf(al, instr->environment()); + return; + } Handle name = instr->hydrogen()->name(); - if (map_count == 0) { - ASSERT(instr->hydrogen()->need_generic()); - __ mov(r2, Operand(name)); - Handle ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - Label done; - __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - for (int i = 0; i < map_count - 1; ++i) { - Handle map = instr->hydrogen()->types()->at(i); + Label done; + __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); + for (int i = 0; i < map_count; ++i) { + bool last = (i == map_count - 1); + Handle map = instr->hydrogen()->types()->at(i); + Label check_passed; + __ CompareMap( + object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS); + if (last && !need_generic) { + DeoptimizeIf(ne, instr->environment()); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); + } else { Label next; - __ cmp(scratch, Operand(map)); __ b(ne, &next); - EmitLoadFieldOrConstantFunction(result, object, map, name); + __ bind(&check_passed); + EmitLoadFieldOrConstantFunction( + result, object, map, name, instr->environment()); __ b(&done); __ bind(&next); } - Handle map = instr->hydrogen()->types()->last(); - __ cmp(scratch, Operand(map)); - if (instr->hydrogen()->need_generic()) { - Label generic; - __ b(ne, &generic); - EmitLoadFieldOrConstantFunction(result, object, map, name); - __ b(&done); - __ bind(&generic); - __ mov(r2, Operand(name)); - Handle ic = isolate()->builtins()->LoadIC_Initialize(); - CallCode(ic, RelocInfo::CODE_TARGET, instr); - } else { - DeoptimizeIf(ne, instr->environment()); - EmitLoadFieldOrConstantFunction(result, object, map, name); - } - __ bind(&done); } + if (need_generic) { + __ mov(r2, Operand(name)); + Handle ic = isolate()->builtins()->LoadIC_Initialize(); + CallCode(ic, RelocInfo::CODE_TARGET, instr); + } + __ bind(&done); } @@ -2579,8 +2728,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ ubfx(scratch, scratch, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ cmp(scratch, Operand(FAST_ELEMENTS)); - __ b(eq, &done); + __ cmp(scratch, Operand(GetInitialFastElementsKind())); + __ b(lt, &fail); + __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND)); + __ b(le, &done); __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ b(lt, &fail); __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); @@ -2627,13 +2778,20 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { // Load the result. __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + uint32_t offset = FixedArray::kHeaderSize + + (instr->additional_index() << kPointerSizeLog2); + __ ldr(result, FieldMemOperand(scratch, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr->environment()); + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ tst(result, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } else { + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + __ cmp(result, scratch); + DeoptimizeIf(eq, instr->environment()); + } } } @@ -2659,18 +2817,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( } Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand(((constant_key + instr->additional_index()) << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(elements, elements, operand); if (!key_is_constant) { __ add(elements, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + (instr->additional_index() << shift_size))); } - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } __ vldr(result, elements, 0); } @@ -2692,26 +2853,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister result = ToDoubleRegister(instr->result()); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size)) + ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), 0); + __ vldr(result.low(), scratch0(), additional_offset); __ vcvt_f64_f32(result, result.low()); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), 0); + __ vldr(result, scratch0(), additional_offset); } } else { Register result = ToRegister(instr->result()); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + (constant_key << shift_size) + additional_offset) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: __ ldrsb(result, mem_operand); @@ -2739,9 +2907,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2764,16 +2935,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); - // Check if the calling frame is an arguments adaptor frame. - Label done, adapted; - __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); - __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + if (instr->hydrogen()->from_inlined()) { + __ sub(result, sp, Operand(2 * kPointerSize)); + } else { + // Check if the calling frame is an arguments adaptor frame. + Label done, adapted; + __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); + __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - // Result is the frame pointer for the frame if not adapted and for the real - // frame below the adaptor frame if adapted. - __ mov(result, fp, LeaveCC, ne); - __ mov(result, scratch, LeaveCC, eq); + // Result is the frame pointer for the frame if not adapted and for the real + // frame below the adaptor frame if adapted. + __ mov(result, fp, LeaveCC, ne); + __ mov(result, scratch, LeaveCC, eq); + } } @@ -2882,7 +3057,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ b(ne, &loop); __ bind(&invoke); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( @@ -2907,6 +3082,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { } +void LCodeGen::DoDrop(LDrop* instr) { + __ Drop(instr->count()); +} + + void LCodeGen::DoThisFunction(LThisFunction* instr) { Register result = ToRegister(instr->result()); __ LoadHeapObject(result, instr->hydrogen()->closure()); @@ -2953,7 +3133,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle function, int arity, LInstruction* instr, - CallKind call_kind) { + CallKind call_kind, + R1State r1_state) { bool can_invoke_directly = !function->NeedsArgumentsAdaption() || function->shared()->formal_parameter_count() == arity; @@ -2961,7 +3142,10 @@ void LCodeGen::CallKnownFunction(Handle function, RecordPosition(pointers->position()); if (can_invoke_directly) { - __ LoadHeapObject(r1, function); + if (r1_state == R1_UNINITIALIZED) { + __ LoadHeapObject(r1, function); + } + // Change context if needed. bool change_context = (info()->closure()->context() != function->context()) || @@ -3000,7 +3184,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { CallKnownFunction(instr->function(), instr->arity(), instr, - CALL_AS_METHOD); + CALL_AS_METHOD, + R1_UNINITIALIZED); } @@ -3109,7 +3294,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { } else { // Representation is tagged. DeferredMathAbsTaggedHeapNumber* deferred = - new DeferredMathAbsTaggedHeapNumber(this, instr); + new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); Register input = ToRegister(instr->InputAt(0)); // Smi check. __ JumpIfNotSmi(input, deferred->entry()); @@ -3286,7 +3471,7 @@ void LCodeGen::DoRandom(LRandom* instr) { LRandom* instr_; }; - DeferredDoRandom* deferred = new DeferredDoRandom(this, instr); + DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); // Having marked this instruction as a call we can use any // registers. @@ -3424,13 +3609,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); - ASSERT(instr->HasDeoptimizationEnvironment()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); - ParameterCount count(instr->arity()); - __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + if (instr->known_function().is_null()) { + LPointerMap* pointers = instr->pointer_map(); + RecordPosition(pointers->position()); + SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); + ParameterCount count(instr->arity()); + __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } else { + CallKnownFunction(instr->known_function(), + instr->arity(), + instr, + CALL_AS_METHOD, + R1_CONTAINS_TARGET); + } } @@ -3485,7 +3678,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION); + CallKnownFunction(instr->target(), + instr->arity(), + instr, + CALL_AS_FUNCTION, + R1_UNINITIALIZED); } @@ -3515,6 +3712,18 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (!instr->transition().is_null()) { __ mov(scratch, Operand(instr->transition())); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + if (instr->hydrogen()->NeedsWriteBarrierForMap()) { + Register temp = ToRegister(instr->TempAt(0)); + // Update the write barrier for the map field. + __ RecordWriteField(object, + HeapObject::kMapOffset, + scratch, + temp, + kLRHasBeenSaved, + kSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + } } // Do the store. @@ -3583,10 +3792,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; + (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize + + FixedArray::kHeaderSize; __ str(value, FieldMemOperand(elements, offset)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + if (instr->additional_index() != 0) { + __ add(scratch, + scratch, + Operand(instr->additional_index() << kPointerSizeLog2)); + } __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); } @@ -3615,7 +3830,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( Register scratch = scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); int constant_key = 0; - Label not_nan; // Calculate the effective address of the slot in the array to store the // double value. @@ -3629,7 +3843,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand((constant_key << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(scratch, elements, operand); @@ -3638,14 +3852,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - - // Only load canonical NaN if the comparison above set the overflow. - __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs); + if (instr->NeedsCanonicalization()) { + // Check for NaN. All NaNs must be canonicalized. + __ VFPCompareAndSetFlags(value, value); + // Only load canonical NaN if the comparison above set the overflow. + __ Vmov(value, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + vs); + } - __ bind(¬_nan); - __ vstr(value, scratch, 0); + __ vstr(value, scratch, instr->additional_index() << shift_size); } @@ -3666,25 +3882,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); - Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) + Operand operand(key_is_constant ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size)); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), scratch0(), 0); + __ vstr(double_scratch0().low(), scratch0(), additional_offset); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vstr(value, scratch0(), 0); + __ vstr(value, scratch0(), additional_offset); } } else { Register value(ToRegister(instr->value())); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + ((constant_key + instr->additional_index()) + << shift_size)) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: @@ -3703,7 +3927,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3740,20 +3967,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ cmp(scratch, Operand(from_map)); __ b(ne, ¬_applicable); __ mov(new_map_reg, Operand(to_map)); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kLRHasBeenSaved, kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); @@ -3787,7 +4016,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { }; DeferredStringCharCodeAt* deferred = - new DeferredStringCharCodeAt(this, instr); + new(zone()) DeferredStringCharCodeAt(this, instr); StringCharLoadGenerator::Generate(masm(), ToRegister(instr->string()), @@ -3842,7 +4071,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { }; DeferredStringCharFromCode* deferred = - new DeferredStringCharFromCode(this, instr); + new(zone()) DeferredStringCharFromCode(this, instr); ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); Register char_code = ToRegister(instr->char_code()); @@ -3916,7 +4145,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { Register src = ToRegister(instr->InputAt(0)); Register dst = ToRegister(instr->result()); - DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr); + DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); __ SmiTag(dst, src, SetCC); __ b(vs, deferred->entry()); __ bind(deferred->exit()); @@ -3987,7 +4216,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Register temp1 = ToRegister(instr->TempAt(0)); Register temp2 = ToRegister(instr->TempAt(1)); - DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr); + DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); @@ -4189,7 +4418,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(input); - DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr); + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); // Optimistically untag the input. // If the input is a HeapObject, SmiUntag will set the carry flag. @@ -4338,14 +4567,22 @@ void LCodeGen::DoCheckMapCommon(Register reg, } -void LCodeGen::DoCheckMap(LCheckMap* instr) { +void LCodeGen::DoCheckMaps(LCheckMaps* instr) { Register scratch = scratch0(); LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); Register reg = ToRegister(input); - Handle map = instr->hydrogen()->map(); - DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(), - instr->environment()); + + Label success; + SmallMapList* map_set = instr->hydrogen()->map_set(); + for (int i = 0; i < map_set->length() - 1; i++) { + Handle map = map_set->at(i); + __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); + __ b(eq, &success); + } + Handle map = map_set->last(); + DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); + __ bind(&success); } @@ -4441,7 +4678,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { LAllocateObject* instr_; }; - DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr); + DeferredAllocateObject* deferred = + new(zone()) DeferredAllocateObject(this, instr); Register result = ToRegister(instr->result()); Register scratch = ToRegister(instr->TempAt(0)); @@ -4464,6 +4702,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { deferred->entry(), TAG_OBJECT); + __ bind(deferred->exit()); + if (FLAG_debug_code) { + Label is_in_new_space; + __ JumpIfInNewSpace(result, scratch, &is_in_new_space); + __ Abort("Allocated object is not in new-space"); + __ bind(&is_in_new_space); + } + // Load the initial map. Register map = scratch; __ LoadHeapObject(map, constructor); @@ -4482,14 +4728,14 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { __ str(scratch, FieldMemOperand(result, property_offset)); } } - - __ bind(deferred->exit()); } void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); Handle constructor = instr->hydrogen()->constructor(); + Handle initial_map(constructor->initial_map()); + int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already @@ -4497,9 +4743,9 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { __ mov(result, Operand(0)); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ LoadHeapObject(r0, constructor); + __ mov(r0, Operand(Smi::FromInt(instance_size))); __ push(r0); - CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); __ StoreToSafepointRegisterSlot(r0, result); } @@ -4511,8 +4757,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); // Load map into r2. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -4633,9 +4880,10 @@ void LCodeGen::EmitDeepCopy(Handle object, __ str(r2, FieldMemOperand(result, total_offset + 4)); } } else if (elements->IsFixedArray()) { + Handle fast_elements = Handle::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle value = JSObject::GetElement(object, i); + Handle value(fast_elements->get(i)); if (value->IsJSObject()) { Handle value_object = Handle::cast(value); __ add(r2, result, Operand(*offset)); @@ -4659,6 +4907,24 @@ void LCodeGen::EmitDeepCopy(Handle object, void LCodeGen::DoFastLiteral(LFastLiteral* instr) { int size = instr->hydrogen()->total_size(); + ElementsKind boilerplate_elements_kind = + instr->hydrogen()->boilerplate()->GetElementsKind(); + + // Deopt if the array literal boilerplate ElementsKind is of a type different + // than the expected one. The check isn't necessary if the boilerplate has + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { + __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); + // Load map into r2. + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + // Load the map's "bit field 2". + __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount); + __ cmp(r2, Operand(boilerplate_elements_kind)); + DeoptimizeIf(ne, instr->environment()); + } // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. @@ -4923,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { int current_pc = masm()->pc_offset(); int patch_size = Deoptimizer::patch_size(); if (current_pc < last_lazy_deopt_pc_ + patch_size) { + // Block literal pool emission for duration of padding. + Assembler::BlockConstPoolScope block_const_pool(masm()); int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; ASSERT_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { @@ -4954,7 +5222,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register strict = scratch0(); __ mov(strict, Operand(Smi::FromInt(strict_mode_flag()))); __ Push(object, key, strict); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( @@ -4967,7 +5235,7 @@ void LCodeGen::DoIn(LIn* instr) { Register obj = ToRegister(instr->object()); Register key = ToRegister(instr->key()); __ Push(key, obj); - ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); + ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); @@ -5017,7 +5285,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { ASSERT(instr->hydrogen()->is_backwards_branch()); // Perform stack overflow check if this goto needs it before jumping. DeferredStackCheck* deferred_stack_check = - new DeferredStackCheck(this, instr); + new(zone()) DeferredStackCheck(this, instr); __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(lo, deferred_stack_check->entry()); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index adb6e1bb73..f35c69b8a3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -43,22 +43,26 @@ class SafepointGenerator; class LCodeGen BASE_EMBEDDED { public: - LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) + LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info, + Zone* zone) : chunk_(chunk), masm_(assembler), info_(info), current_block_(-1), current_instruction_(-1), instructions_(chunk->instructions()), - deoptimizations_(4), - deopt_jump_table_(4), - deoptimization_literals_(8), + deoptimizations_(4, zone), + deopt_jump_table_(4, zone), + deoptimization_literals_(8, zone), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), - deferred_(8), + translations_(zone), + deferred_(8, zone), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + safepoints_(zone), + zone_(zone), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); @@ -71,6 +75,7 @@ class LCodeGen BASE_EMBEDDED { Isolate* isolate() const { return info_->isolate(); } Factory* factory() const { return isolate()->factory(); } Heap* heap() const { return isolate()->heap(); } + Zone* zone() const { return zone_; } // Support for converting LOperands to assembler types. // LOperand must be a register. @@ -176,7 +181,7 @@ class LCodeGen BASE_EMBEDDED { void Abort(const char* format, ...); void Comment(const char* format, ...); - void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); } + void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. @@ -215,12 +220,18 @@ class LCodeGen BASE_EMBEDDED { int argc, LInstruction* instr); + enum R1State { + R1_UNINITIALIZED, + R1_CONTAINS_TARGET + }; + // Generate a direct call to a known function. Expects the function // to be in r1. void CallKnownFunction(Handle function, int arity, LInstruction* instr, - CallKind call_kind); + CallKind call_kind, + R1State r1_state); void LoadHeapObject(Register result, Handle object); @@ -308,7 +319,8 @@ class LCodeGen BASE_EMBEDDED { void EmitLoadFieldOrConstantFunction(Register result, Register object, Handle type, - Handle name); + Handle name, + LEnvironment* env); // Emits optimized code to deep-copy the contents of statically known // object graphs (e.g. object literal boilerplate). @@ -317,6 +329,17 @@ class LCodeGen BASE_EMBEDDED { Register source, int* offset); + // Emit optimized code for integer division. + // Inputs are signed. + // All registers are clobbered. + // If 'remainder' is no_reg, it is not computed. + void EmitSignedIntegerDivisionByConstant(Register result, + Register dividend, + int32_t divisor, + Register remainder, + Register scratch, + LEnvironment* environment); + struct JumpTableEntry { explicit inline JumpTableEntry(Address entry) : label(), @@ -349,6 +372,8 @@ class LCodeGen BASE_EMBEDDED { // itself is emitted at the end of the generated code. SafepointTableBuilder safepoints_; + Zone* zone_; + // Compiler from a set of parallel moves to a sequential list of moves. LGapResolver resolver_; diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index cefca476ad..c100720d89 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -36,7 +36,7 @@ namespace internal { static const Register kSavedValueRegister = { 9 }; LGapResolver::LGapResolver(LCodeGen* owner) - : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), saved_destination_(NULL) { } @@ -79,7 +79,7 @@ void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { const ZoneList* moves = parallel_move->move_operands(); for (int i = 0; i < moves->length(); ++i) { LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) moves_.Add(move); + if (!move.IsRedundant()) moves_.Add(move, cgen_->zone()); } Verify(); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 857c2bf770..7c49e9e58a 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } @@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(ls, fail); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(hi, fail); } @@ -1995,24 +2000,27 @@ void MacroAssembler::CompareMap(Register obj, Label* early_success, CompareMapMode mode) { ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - cmp(scratch, Operand(map)); - if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle(transitioned_fast_element_map))); - } + CompareMap(scratch, map, early_success, mode); +} + - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle(transitioned_double_map))); +void MacroAssembler::CompareMap(Register obj_map, + Handle map, + Label* early_success, + CompareMapMode mode) { + cmp(obj_map, Operand(map)); + if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind); + if (!current_map) break; + b(eq, early_success); + cmp(obj_map, Operand(Handle(current_map))); + } } } } @@ -2865,28 +2873,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index))); - cmp(map_in_out, ip); + ldr(scratch, + MemOperand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmp(map_in_out, scratch); b(ne, no_map_match); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + ldr(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; ldr(map_out, FieldMemOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); @@ -3710,22 +3728,35 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { } -bool AreAliased(Register r1, Register r2, Register r3, Register r4) { - if (r1.is(r2)) return true; - if (r1.is(r3)) return true; - if (r1.is(r4)) return true; - if (r2.is(r3)) return true; - if (r2.is(r4)) return true; - if (r3.is(r4)) return true; - return false; +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3, + Register reg4, + Register reg5, + Register reg6) { + int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + + reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid(); + + RegList regs = 0; + if (reg1.is_valid()) regs |= reg1.bit(); + if (reg2.is_valid()) regs |= reg2.bit(); + if (reg3.is_valid()) regs |= reg3.bit(); + if (reg4.is_valid()) regs |= reg4.bit(); + if (reg5.is_valid()) regs |= reg5.bit(); + if (reg6.is_valid()) regs |= reg6.bit(); + int n_of_non_aliasing_regs = NumRegs(regs); + + return n_of_valid_regs != n_of_non_aliasing_regs; } +#endif CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), size_(instructions * Assembler::kInstrSize), - masm_(Isolate::Current(), address, size_ + Assembler::kGap) { + masm_(NULL, address, size_ + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 47afa93a6e..6b7d116357 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -85,7 +85,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; -bool AreAliased(Register r1, Register r2, Register r3, Register r4); +#ifdef DEBUG +bool AreAliased(Register reg1, + Register reg2, + Register reg3 = no_reg, + Register reg4 = no_reg, + Register reg5 = no_reg, + Register reg6 = no_reg); +#endif // MacroAssembler implements a collection of frequently used macros. @@ -505,7 +512,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); void LoadGlobalFunction(int index, Register function); @@ -795,9 +803,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); + void CheckFastSmiElements(Register map, + Register scratch, + Label* fail); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in @@ -823,6 +831,13 @@ class MacroAssembler: public Assembler { Label* early_success, CompareMapMode mode = REQUIRE_EXACT_MAP); + // As above, but the map of the object is already loaded into the register + // which is preserved by the code generated. + void CompareMap(Register obj_map, + Handle map, + Label* early_success, + CompareMapMode mode = REQUIRE_EXACT_MAP); + // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match @@ -1321,7 +1336,6 @@ class MacroAssembler: public Assembler { }; -#ifdef ENABLE_DEBUGGER_SUPPORT // The code patcher is used to patch (typically) small parts of code e.g. for // debugging and other types of instrumentation. When using the code patcher // the exact number of bytes specified must be emitted. It is not legal to emit @@ -1351,7 +1365,6 @@ class CodePatcher { int size_; // Number of bytes of the expected patch size. MacroAssembler masm_; // Macro assembler used to generate the code. }; -#endif // ENABLE_DEBUGGER_SUPPORT // ----------------------------------------------------------------------------- diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 10ff2dd96c..66cdd8435e 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -43,45 +43,49 @@ namespace internal { #ifndef V8_INTERPRETED_REGEXP /* * This assembler uses the following register assignment convention + * - r4 : Temporarily stores the index of capture start after a matching pass + * for a global regexp. * - r5 : Pointer to current code object (Code*) including heap object tag. * - r6 : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character offset! * - r7 : Currently loaded character. Must be loaded using * LoadCurrentCharacter before using any of the dispatch methods. - * - r8 : points to tip of backtrack stack + * - r8 : Points to tip of backtrack stack * - r9 : Unused, might be used by C code and expected unchanged. * - r10 : End of input (points to byte after last character in input). * - r11 : Frame pointer. Used to access arguments, local variables and * RegExp registers. * - r12 : IP register, used by assembler. Very volatile. - * - r13/sp : points to tip of C stack. + * - r13/sp : Points to tip of C stack. * * The remaining registers are free for computations. * Each call to a public method should retain this convention. * * The stack will have the following structure: - * - fp[52] Isolate* isolate (Address of the current isolate) - * - fp[48] direct_call (if 1, direct call from JavaScript code, - * if 0, call through the runtime system). - * - fp[44] stack_area_base (High end of the memory area to use as - * backtracking stack). + * - fp[56] Isolate* isolate (address of the current isolate) + * - fp[52] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[48] stack_area_base (high end of the memory area to use as + * backtracking stack). + * - fp[44] capture array size (may fit multiple sets of matches) * - fp[40] int* capture_array (int[num_saved_registers_], for output). * - fp[36] secondary link/return address used by native call. * --- sp when called --- - * - fp[32] return address (lr). - * - fp[28] old frame pointer (r11). + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). * - fp[0..24] backup of registers r4..r10. * --- frame pointer ---- - * - fp[-4] end of input (Address of end of string). - * - fp[-8] start of input (Address of first character in string). + * - fp[-4] end of input (address of end of string). + * - fp[-8] start of input (address of first character in string). * - fp[-12] start index (character index of start). * - fp[-16] void* input_string (location of a handle containing the string). - * - fp[-20] Offset of location before start of input (effectively character + * - fp[-20] success counter (only for global regexps to count matches). + * - fp[-24] Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a * non-position. - * - fp[-24] At start (if 1, we are starting at the start of the + * - fp[-28] At start (if 1, we are starting at the start of the * string, otherwise 0) - * - fp[-28] register 0 (Only positions must be stored in the first + * - fp[-32] register 0 (Only positions must be stored in the first * - register 1 num_saved_registers_ registers) * - ... * - register num_registers-1 @@ -115,8 +119,10 @@ namespace internal { RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( Mode mode, - int registers_to_save) - : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), + int registers_to_save, + Zone* zone) + : NativeRegExpMacroAssembler(zone), + masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), mode_(mode), num_registers_(registers_to_save), num_saved_registers_(registers_to_save), @@ -197,9 +203,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, ¬_at_start); + BranchOrBacktrack(ne, ¬_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); @@ -212,9 +218,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, on_not_at_start); + BranchOrBacktrack(ne, on_not_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ add(r0, end_of_input_address(), Operand(current_input_offset())); @@ -432,16 +438,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReference( } -void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1, - int reg2, - Label* on_not_equal) { - __ ldr(r0, register_location(reg1)); - __ ldr(r1, register_location(reg2)); - __ cmp(r0, r1); - BranchOrBacktrack(ne, on_not_equal); -} - - void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c, Label* on_not_equal) { __ cmp(current_character(), Operand(c)); @@ -452,8 +448,12 @@ void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c, void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, uint32_t mask, Label* on_equal) { - __ and_(r0, current_character(), Operand(mask)); - __ cmp(r0, Operand(c)); + if (c == 0) { + __ tst(current_character(), Operand(mask)); + } else { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + } BranchOrBacktrack(eq, on_equal); } @@ -461,8 +461,12 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c, void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c, unsigned mask, Label* on_not_equal) { - __ and_(r0, current_character(), Operand(mask)); - __ cmp(r0, Operand(c)); + if (c == 0) { + __ tst(current_character(), Operand(mask)); + } else { + __ and_(r0, current_character(), Operand(mask)); + __ cmp(r0, Operand(c)); + } BranchOrBacktrack(ne, on_not_equal); } @@ -480,6 +484,44 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd( } +void RegExpMacroAssemblerARM::CheckCharacterInRange( + uc16 from, + uc16 to, + Label* on_in_range) { + __ sub(r0, current_character(), Operand(from)); + __ cmp(r0, Operand(to - from)); + BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition. +} + + +void RegExpMacroAssemblerARM::CheckCharacterNotInRange( + uc16 from, + uc16 to, + Label* on_not_in_range) { + __ sub(r0, current_character(), Operand(from)); + __ cmp(r0, Operand(to - from)); + BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition. +} + + +void RegExpMacroAssemblerARM::CheckBitInTable( + Handle table, + Label* on_bit_set) { + __ mov(r0, Operand(table)); + if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + __ and_(r1, current_character(), Operand(kTableSize - 1)); + __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag)); + } else { + __ add(r1, + current_character(), + Operand(ByteArray::kHeaderSize - kHeapObjectTag)); + } + __ ldrb(r0, MemOperand(r0, r1)); + __ cmp(r0, Operand(0)); + BranchOrBacktrack(ne, on_bit_set); +} + + bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, Label* on_no_match) { // Range checks (c in min..max) are generally implemented by an unsigned @@ -609,6 +651,7 @@ void RegExpMacroAssemblerARM::Fail() { Handle RegExpMacroAssemblerARM::GetCode(Handle source) { + Label return_r0; // Finalize code - write the entry point code now we know how many // registers we need. @@ -632,8 +675,9 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); + __ mov(r0, Operand(0, RelocInfo::NONE)); + __ push(r0); // Make room for success counter and initialize it to 0. __ push(r0); // Make room for "position - 1" constant (value is irrelevant). - __ push(r0); // Make room for "at start" constant (value is irrelevant). // Check if we have space on the stack for registers. Label stack_limit_hit; Label stack_ok; @@ -652,13 +696,13 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); __ bind(&stack_limit_hit); CallCheckStackGuardState(r0); __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returned value is non-zero, we exit with the returned value as result. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); __ bind(&stack_ok); @@ -679,41 +723,45 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // position registers. __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); - // Determine whether the start index is zero, that is at the start of the - // string, and store that value in a local variable. - __ cmp(r1, Operand(0)); - __ mov(r1, Operand(1), LeaveCC, eq); - __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne); - __ str(r1, MemOperand(frame_pointer(), kAtStart)); + // Initialize code pointer register + __ mov(code_pointer(), Operand(masm_->CodeObject())); + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(ne, &load_char_start_regexp); + __ mov(current_character(), Operand('\n'), LeaveCC, eq); + __ jmp(&start_regexp); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. // Fill saved registers with initial value = start offset - 1 - - // Address of register 0. - __ add(r1, frame_pointer(), Operand(kRegisterZero)); - __ mov(r2, Operand(num_saved_registers_)); - Label init_loop; - __ bind(&init_loop); - __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); - __ sub(r2, r2, Operand(1), SetCC); - __ b(ne, &init_loop); + if (num_saved_registers_ > 8) { + // Address of register 0. + __ add(r1, frame_pointer(), Operand(kRegisterZero)); + __ mov(r2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); + __ sub(r2, r2, Operand(1), SetCC); + __ b(ne, &init_loop); + } else { + for (int i = 0; i < num_saved_registers_; i++) { + __ str(r0, register_location(i)); + } + } } // Initialize backtrack stack pointer. __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); - // Initialize code pointer register - __ mov(code_pointer(), Operand(masm_->CodeObject())); - // Load previous char as initial value of current character register. - Label at_start; - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(ne, &at_start); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); - __ mov(current_character(), Operand('\n')); - __ jmp(&start_label_); + __ jmp(&start_label_); // Exit code: if (success_label_.is_linked()) { @@ -740,6 +788,10 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { for (int i = 0; i < num_saved_registers_; i += 2) { __ ldr(r2, register_location(i)); __ ldr(r3, register_location(i + 1)); + if (i == 0 && global_with_zero_length_check()) { + // Keep capture start in r4 for the zero-length check later. + __ mov(r4, r2); + } if (mode_ == UC16) { __ add(r2, r1, Operand(r2, ASR, 1)); __ add(r3, r1, Operand(r3, ASR, 1)); @@ -751,10 +803,58 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); } } - __ mov(r0, Operand(SUCCESS)); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput)); + // Increment success counter. + __ add(r0, r0, Operand(1)); + __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ sub(r1, r1, Operand(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmp(r1, Operand(num_saved_registers_)); + __ b(lt, &return_r0); + + __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + // Advance the location for output. + __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize)); + __ str(r2, MemOperand(frame_pointer(), kRegisterOutput)); + + // Prepare r0 to initialize registers with its value in the next run. + __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); + + if (global_with_zero_length_check()) { + // Special case for zero-length matches. + // r4: capture start index + __ cmp(current_input_offset(), r4); + // Not a zero-length match, restart. + __ b(ne, &load_char_start_regexp); + // Offset from the end is zero if we already reached the end. + __ cmp(current_input_offset(), Operand(0)); + __ b(eq, &exit_label_); + // Advance current position after a zero-length match. + __ add(current_input_offset(), + current_input_offset(), + Operand((mode_ == UC16) ? 2 : 1)); + } + + __ b(&load_char_start_regexp); + } else { + __ mov(r0, Operand(SUCCESS)); + } } + // Exit and return r0 __ bind(&exit_label_); + if (global()) { + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + } + + __ bind(&return_r0); // Skip sp past regexp registers and local variables.. __ mov(sp, frame_pointer()); // Restore registers r4..r11 and return (restoring lr to pc). @@ -776,7 +876,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returning non-zero, we should end execution with the given // result as return value. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); // String might have moved: Reload end of string from frame. __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); @@ -813,7 +913,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); } CodeDesc code_desc; @@ -968,8 +1068,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerARM::Succeed() { +bool RegExpMacroAssemblerARM::Succeed() { __ jmp(&success_label_); + return global(); } @@ -1261,8 +1362,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); if (cp_offset != 0) { - __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); - offset = r0; + // r4 is not being used to store the capture start index at this point. + __ add(r4, current_input_offset(), Operand(cp_offset * char_size())); + offset = r4; } // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU // and the operating system running on the target allow it. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 5c8ed0693f..9bebb4d406 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -45,7 +45,7 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler { #else // V8_INTERPRETED_REGEXP class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { public: - RegExpMacroAssemblerARM(Mode mode, int registers_to_save); + RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone); virtual ~RegExpMacroAssemblerARM(); virtual int stack_limit_slack(); virtual void AdvanceCurrentPosition(int by); @@ -70,7 +70,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void CheckNotBackReference(int start_reg, Label* on_no_match); virtual void CheckNotBackReferenceIgnoreCase(int start_reg, Label* on_no_match); - virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal); virtual void CheckNotCharacter(unsigned c, Label* on_not_equal); virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask, @@ -79,6 +78,14 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { uc16 minus, uc16 mask, Label* on_not_equal); + virtual void CheckCharacterInRange(uc16 from, + uc16 to, + Label* on_in_range); + virtual void CheckCharacterNotInRange(uc16 from, + uc16 to, + Label* on_not_in_range); + virtual void CheckBitInTable(Handle table, Label* on_bit_set); + // Checks whether the given offset from the current position is before // the end of the string. virtual void CheckPosition(int cp_offset, Label* on_outside_input); @@ -105,7 +112,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -129,7 +136,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -141,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kInputString = kStartIndex - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = kInputString - kPointerSize; - static const int kAtStart = kInputStartMinusOne - kPointerSize; + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kAtStart - kPointerSize; + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; // Initial size of code buffer. static const size_t kRegExpCodeSize = 1024; diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 585f1e0176..d1cad15bd0 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,16 +49,16 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, - void*, int*, Address, int, Isolate*); + void*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ (FUNCTION_CAST(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7)) + p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ reinterpret_cast(try_catch_address) @@ -401,9 +401,9 @@ class Simulator { reinterpret_cast(Simulator::current(Isolate::Current())->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ Simulator::current(Isolate::Current())->Call( \ - entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) + entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == NULL ? \ diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index d514b607ae..dd9de23fa4 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -435,22 +435,59 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle object, int index, Handle transition, + Handle name, Register receiver_reg, Register name_reg, - Register scratch, + Register scratch1, + Register scratch2, Label* miss_label) { // r0 : value Label exit; + LookupResult lookup(masm->isolate()); + object->Lookup(*name, &lookup); + if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) { + // In sloppy mode, we could just return the value and be done. However, we + // might be in strict mode, where we have to throw. Since we cannot tell, + // go into slow case unconditionally. + __ jmp(miss_label); + return; + } + // Check that the map of the object hasn't changed. CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS : REQUIRE_EXACT_MAP; - __ CheckMap(receiver_reg, scratch, Handle(object->map()), miss_label, + __ CheckMap(receiver_reg, scratch1, Handle(object->map()), miss_label, DO_SMI_CHECK, mode); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label); + __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); + } + + // Check that we are allowed to write this. + if (!transition.is_null() && object->GetPrototype()->IsJSObject()) { + JSObject* holder; + if (lookup.IsFound()) { + holder = lookup.holder(); + } else { + // Find the top object. + holder = *object; + do { + holder = JSObject::cast(holder->GetPrototype()); + } while (holder->GetPrototype()->IsJSObject()); + } + // We need an extra register, push + __ push(name_reg); + Label miss_pop, done_check; + CheckPrototypes(object, receiver_reg, Handle(holder), name_reg, + scratch1, scratch2, name, &miss_pop); + __ jmp(&done_check); + __ bind(&miss_pop); + __ pop(name_reg); + __ jmp(miss_label); + __ bind(&done_check); + __ pop(name_reg); } // Stub never generated for non-global objects that require access @@ -473,10 +510,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } if (!transition.is_null()) { - // Update the map of the object; no write barrier updating is - // needed because the map is never in new space. - __ mov(ip, Operand(transition)); - __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + // Update the map of the object. + __ mov(scratch1, Operand(transition)); + __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); + + // Update the write barrier for the map field and pass the now unused + // name_reg as scratch register. + __ RecordWriteField(receiver_reg, + HeapObject::kMapOffset, + scratch1, + name_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); } // Adjust for the number of properties stored in the object. Even in the @@ -498,15 +545,16 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ RecordWriteField(receiver_reg, offset, name_reg, - scratch, + scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array - __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ str(r0, FieldMemOperand(scratch, offset)); + __ ldr(scratch1, + FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); + __ str(r0, FieldMemOperand(scratch1, offset)); // Skip updating write barrier if storing a smi. __ JumpIfSmi(r0, &exit); @@ -514,7 +562,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. __ mov(name_reg, r0); - __ RecordWriteField(scratch, + __ RecordWriteField(scratch1, offset, name_reg, receiver_reg, @@ -582,6 +630,8 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(holder); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); + __ mov(scratch, Operand(ExternalReference::isolate_address())); + __ push(scratch); } @@ -596,7 +646,7 @@ static void CompileCallLoadPropertyWithInterceptor( ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), masm->isolate()); - __ mov(r0, Operand(5)); + __ mov(r0, Operand(6)); __ mov(r1, Operand(ref)); CEntryStub stub(1); @@ -604,9 +654,9 @@ static void CompileCallLoadPropertyWithInterceptor( } -static const int kFastApiCallArguments = 3; +static const int kFastApiCallArguments = 4; -// Reserves space for the extra arguments to FastHandleApiCall in the +// Reserves space for the extra arguments to API function in the // caller's frame. // // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. @@ -632,7 +682,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee JS function // -- sp[8] : call data - // -- sp[12] : last JS argument + // -- sp[12] : isolate + // -- sp[16] : last JS argument // -- ... // -- sp[(argc + 3) * 4] : first JS argument // -- sp[(argc + 4) * 4] : receiver @@ -642,7 +693,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ LoadHeapObject(r5, function); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); - // Pass the additional arguments FastHandleApiCall expects. + // Pass the additional arguments. Handle api_call_info = optimization.api_call_info(); Handle call_data(api_call_info->data()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { @@ -651,13 +702,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ Move(r6, call_data); } - // Store JS function and call data. - __ stm(ib, sp, r5.bit() | r6.bit()); + __ mov(r7, Operand(ExternalReference::isolate_address())); + // Store JS function, call data and isolate. + __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); - // r2 points to call data as expected by Arguments - // (refer to layout above). - __ add(r2, sp, Operand(2 * kPointerSize)); + // Prepare arguments. + __ add(r2, sp, Operand(3 * kPointerSize)); + // Allocate the v8::Arguments structure in the arguments' space since + // it's not controlled by GC. const int kApiStackSpace = 4; FrameScope frame_scope(masm, StackFrame::MANUAL); @@ -666,9 +719,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // r0 = v8::Arguments& // Arguments is after the return address. __ add(r0, sp, Operand(1 * kPointerSize)); - // v8::Arguments::implicit_args = data + // v8::Arguments::implicit_args_ __ str(r2, MemOperand(r0, 0 * kPointerSize)); - // v8::Arguments::values = last argument + // v8::Arguments::values_ __ add(ip, r2, Operand(argc * kPointerSize)); __ str(ip, MemOperand(r0, 1 * kPointerSize)); // v8::Arguments::length_ = argc @@ -845,7 +898,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { __ CallExternalReference( ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), masm->isolate()), - 5); + 6); // Restore the name_ register. __ pop(name_); // Leave the internal frame. @@ -1204,7 +1257,9 @@ void StubCompiler::GenerateLoadCallback(Handle object, } else { __ Move(scratch3, Handle(callback->data())); } - __ Push(reg, scratch3, name_reg); + __ Push(reg, scratch3); + __ mov(scratch3, Operand(ExternalReference::isolate_address())); + __ Push(scratch3, name_reg); __ mov(r0, sp); // r0 = Handle const int kApiStackSpace = 1; @@ -1216,7 +1271,7 @@ void StubCompiler::GenerateLoadCallback(Handle object, __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - const int kStackUnwindSpace = 4; + const int kStackUnwindSpace = 5; Address getter_address = v8::ToCData
(callback->getter()); ApiFunction fun(getter_address); ExternalReference ref = @@ -1252,8 +1307,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle object, compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && lookup->GetCallbackObject()->IsAccessorInfo()) { - compile_followup_inline = - AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL; + AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + compile_followup_inline = callback->getter() != NULL && + callback->IsCompatibleReceiver(*object); } } @@ -1344,20 +1400,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle object, if (!receiver.is(holder_reg)) { ASSERT(scratch1.is(holder_reg)); __ Push(receiver, holder_reg); - __ ldr(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ Push(scratch3, scratch2, name_reg); } else { __ push(receiver); - __ ldr(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ Push(holder_reg, scratch3, scratch2, name_reg); + __ push(holder_reg); } + __ ldr(scratch3, + FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); + __ mov(scratch1, Operand(ExternalReference::isolate_address())); + __ Push(scratch3, scratch1, scratch2, name_reg); ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadCallbackProperty), masm()->isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } else { // !compile_followup_inline // Call the runtime system to load the interceptor. @@ -1371,7 +1426,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle object, ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate()); - __ TailCallExternalReference(ref, 5, 1); + __ TailCallExternalReference(ref, 6, 1); } } @@ -1575,16 +1630,29 @@ Handle CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(r3, r7, &call_builtin); + __ CheckFastSmiElements(r3, r7, &call_builtin); // edx: receiver // r3: map - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r3, r7, + &try_holey_map); + __ mov(r2, receiver); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + r3, + r7, &call_builtin); __ mov(r2, receiver); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); __ bind(&fast_object); } else { __ CheckFastObjectElements(r3, r3, &call_builtin); @@ -1739,7 +1807,7 @@ Handle CallStubCompiler::CompileArrayPopCall( // We can't address the last element in one operation. Compute the more // expensive shift first, and use an offset later on. __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ cmp(r0, r6); __ b(eq, &call_builtin); @@ -1747,7 +1815,7 @@ Handle CallStubCompiler::CompileArrayPopCall( __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Fill with the hole. - __ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); + __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize)); __ Drop(argc + 1); __ Ret(); @@ -2539,7 +2607,13 @@ Handle StoreStubCompiler::CompileStoreField(Handle object, // ----------------------------------- Label miss; - GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss); + GenerateStoreField(masm(), + object, + index, + transition, + name, + r1, r2, r3, r4, + &miss); __ bind(&miss); Handle ic = masm()->isolate()->builtins()->StoreIC_Miss(); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -2594,6 +2668,51 @@ Handle StoreStubCompiler::CompileStoreCallback( } +Handle StoreStubCompiler::CompileStoreViaSetter( + Handle receiver, + Handle setter, + Handle name) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r1 : receiver + // -- r2 : name + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the map of the object hasn't changed. + __ CheckMap(r1, r3, Handle(receiver->map()), &miss, DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + + { + FrameScope scope(masm(), StackFrame::INTERNAL); + + // Save value register, so we can restore it later. + __ push(r0); + + // Call the JavaScript getter with the receiver and the value on the stack. + __ Push(r1, r0); + ParameterCount actual(1); + __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); + + // We have to return the passed value, not the return value of the setter. + __ pop(r0); + + // Restore context register. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); + + __ bind(&miss); + Handle ic = masm()->isolate()->builtins()->StoreIC_Miss(); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + Handle StoreStubCompiler::CompileStoreInterceptor( Handle receiver, Handle name) { @@ -2761,6 +2880,44 @@ Handle LoadStubCompiler::CompileLoadCallback( } +Handle LoadStubCompiler::CompileLoadViaGetter( + Handle name, + Handle receiver, + Handle holder, + Handle getter) { + // ----------- S t a t e ------------- + // -- r0 : receiver + // -- r2 : name + // -- lr : return address + // ----------------------------------- + Label miss; + + // Check that the maps haven't changed. + __ JumpIfSmi(r0, &miss); + CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss); + + { + FrameScope scope(masm(), StackFrame::INTERNAL); + + // Call the JavaScript getter with the receiver on the stack. + __ push(r0); + ParameterCount actual(0); + __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); + + // Restore context register. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } + __ Ret(); + + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + Handle LoadStubCompiler::CompileLoadConstant(Handle object, Handle holder, Handle value, @@ -3085,7 +3242,13 @@ Handle KeyedStoreStubCompiler::CompileStoreField(Handle object, // r3 is used as scratch register. r1 and r2 keep their values if a jump to // the miss label is generated. - GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss); + GenerateStoreField(masm(), + object, + index, + transition, + name, + r2, r1, r3, r4, + &miss); __ bind(&miss); __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4); @@ -3366,8 +3529,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3377,6 +3543,44 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { } +static void GenerateSmiKeyCheck(MacroAssembler* masm, + Register key, + Register scratch0, + Register scratch1, + DwVfpRegister double_scratch0, + Label* fail) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + Label key_ok; + // Check for smi or a smi inside a heap number. We convert the heap + // number and check if the conversion is exact and fits into the smi + // range. + __ JumpIfSmi(key, &key_ok); + __ CheckMap(key, + scratch0, + Heap::kHeapNumberMapRootIndex, + fail, + DONT_DO_SMI_CHECK); + __ sub(ip, key, Operand(kHeapObjectTag)); + __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); + __ EmitVFPTruncate(kRoundToZero, + double_scratch0.low(), + double_scratch0, + scratch0, + scratch1, + kCheckForInexactConversion); + __ b(ne, fail); + __ vmov(scratch0, double_scratch0.low()); + __ TrySmiTag(scratch0, fail, scratch1); + __ mov(key, scratch0); + __ bind(&key_ok); + } else { + // Check that the key is a smi. + __ JumpIfNotSmi(key, fail); + } +} + + void KeyedLoadStubCompiler::GenerateLoadExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -3393,8 +3597,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); // r3: elements array @@ -3453,8 +3657,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3724,8 +3931,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -3794,8 +4001,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3858,8 +4068,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3998,8 +4211,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4050,8 +4266,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(r0, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic); // Get the elements array. __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); @@ -4102,8 +4318,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); // Get the elements array. __ ldr(elements_reg, @@ -4178,10 +4394,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - // Check that the key is a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); } @@ -4209,7 +4425,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( DONT_DO_SMI_CHECK); __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4219,7 +4435,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); __ str(value_reg, MemOperand(scratch)); } else { - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4345,7 +4561,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. - __ JumpIfNotSmi(key_reg, &miss_force_generic); + + // Check that the key is a smi or a heap number convertible to a smi. + GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 00a4fee5cd..a1cc5b6a7d 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -465,15 +465,19 @@ function ArrayPush() { } +// Returns an array containing the array elements of the object followed +// by the array elements of each argument in order. See ECMA-262, +// section 15.4.4.7. function ArrayConcat(arg1) { // length == 1 if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { throw MakeTypeError("called_on_null_or_undefined", ["Array.prototype.concat"]); } + var array = ToObject(this); var arg_count = %_ArgumentsLength(); var arrays = new InternalArray(1 + arg_count); - arrays[0] = this; + arrays[0] = array; for (var i = 0; i < arg_count; i++) { arrays[i + 1] = %_Arguments(i); } @@ -1027,13 +1031,28 @@ function ArrayFilter(f, receiver) { var result = new $Array(); var accumulator = new InternalArray(); var accumulator_length = 0; - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (%_CallFunction(receiver, element, i, array, f)) { - accumulator[accumulator_length++] = element; + if (%DebugCallbackSupportsStepping(f)) { + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(f); + if (%_CallFunction(receiver, element, i, array, f)) { + accumulator[accumulator_length++] = element; + } + } + } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + if (%_CallFunction(receiver, element, i, array, f)) { + accumulator[accumulator_length++] = element; + } } } + // End of duplicate. } %MoveArrayContents(accumulator, result); return result; @@ -1059,12 +1078,24 @@ function ArrayForEach(f, receiver) { } else if (!IS_SPEC_OBJECT(receiver)) { receiver = ToObject(receiver); } - - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - %_CallFunction(receiver, element, i, array, f); + if (%DebugCallbackSupportsStepping(f)) { + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(f); + %_CallFunction(receiver, element, i, array, f); + } } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + %_CallFunction(receiver, element, i, array, f); + } + } + // End of duplicate. } } @@ -1091,11 +1122,24 @@ function ArraySome(f, receiver) { receiver = ToObject(receiver); } - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (%_CallFunction(receiver, element, i, array, f)) return true; + if (%DebugCallbackSupportsStepping(f)) { + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(f); + if (%_CallFunction(receiver, element, i, array, f)) return true; + } + } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + if (%_CallFunction(receiver, element, i, array, f)) return true; + } } + // End of duplicate. } return false; } @@ -1121,11 +1165,24 @@ function ArrayEvery(f, receiver) { receiver = ToObject(receiver); } - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - if (!%_CallFunction(receiver, element, i, array, f)) return false; + if (%DebugCallbackSupportsStepping(f)) { + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(f); + if (!%_CallFunction(receiver, element, i, array, f)) return false; + } + } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + if (!%_CallFunction(receiver, element, i, array, f)) return false; + } } + // End of duplicate. } return true; } @@ -1152,11 +1209,24 @@ function ArrayMap(f, receiver) { var result = new $Array(); var accumulator = new InternalArray(length); - for (var i = 0; i < length; i++) { - if (i in array) { - var element = array[i]; - accumulator[i] = %_CallFunction(receiver, element, i, array, f); + if (%DebugCallbackSupportsStepping(f)) { + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(f); + accumulator[i] = %_CallFunction(receiver, element, i, array, f); + } } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (var i = 0; i < length; i++) { + if (i in array) { + var element = array[i]; + accumulator[i] = %_CallFunction(receiver, element, i, array, f); + } + } + // End of duplicate. } %MoveArrayContents(accumulator, result); return result; @@ -1311,11 +1381,27 @@ function ArrayReduce(callback, current) { } var receiver = %GetDefaultReceiver(callback); - for (; i < length; i++) { - if (i in array) { - var element = array[i]; - current = %_CallFunction(receiver, current, element, i, array, callback); + + if (%DebugCallbackSupportsStepping(callback)) { + for (; i < length; i++) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(callback); + current = + %_CallFunction(receiver, current, element, i, array, callback); + } + } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (; i < length; i++) { + if (i in array) { + var element = array[i]; + current = + %_CallFunction(receiver, current, element, i, array, callback); + } } + // End of duplicate. } return current; } @@ -1348,11 +1434,27 @@ function ArrayReduceRight(callback, current) { } var receiver = %GetDefaultReceiver(callback); - for (; i >= 0; i--) { - if (i in array) { - var element = array[i]; - current = %_CallFunction(receiver, current, element, i, array, callback); + + if (%DebugCallbackSupportsStepping(callback)) { + for (; i >= 0; i--) { + if (i in array) { + var element = array[i]; + // Prepare break slots for debugger step in. + %DebugPrepareStepInIfStepping(callback); + current = + %_CallFunction(receiver, current, element, i, array, callback); + } + } + } else { + // This is a duplicate of the previous loop sans debug stepping. + for (; i >= 0; i--) { + if (i in array) { + var element = array[i]; + current = + %_CallFunction(receiver, current, element, i, array, callback); + } } + // End of duplicate. } return current; } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 4944202f07..d4c49ddd45 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -99,21 +99,7 @@ struct DoubleConstant BASE_EMBEDDED { double the_hole_nan; }; -struct InitializeDoubleConstants { - static void Construct(DoubleConstant* double_constants) { - double_constants->min_int = kMinInt; - double_constants->one_half = 0.5; - double_constants->minus_zero = -0.0; - double_constants->uint8_max_value = 255; - double_constants->zero = 0.0; - double_constants->canonical_non_hole_nan = OS::nan_value(); - double_constants->the_hole_nan = BitCast(kHoleNanInt64); - double_constants->negative_infinity = -V8_INFINITY; - } -}; - -static LazyInstance::type - double_constants = LAZY_INSTANCE_INITIALIZER; +static DoubleConstant double_constants; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; @@ -726,6 +712,18 @@ void RelocInfo::Verify() { // ----------------------------------------------------------------------------- // Implementation of ExternalReference +void ExternalReference::SetUp() { + double_constants.min_int = kMinInt; + double_constants.one_half = 0.5; + double_constants.minus_zero = -0.0; + double_constants.uint8_max_value = 255; + double_constants.zero = 0.0; + double_constants.canonical_non_hole_nan = OS::nan_value(); + double_constants.the_hole_nan = BitCast(kHoleNanInt64); + double_constants.negative_infinity = -V8_INFINITY; +} + + ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate) : address_(Redirect(isolate, Builtins::c_function_address(id))) {} @@ -957,51 +955,66 @@ ExternalReference ExternalReference::scheduled_exception_address( } +ExternalReference ExternalReference::address_of_pending_message_obj( + Isolate* isolate) { + return ExternalReference(isolate->pending_message_obj_address()); +} + + +ExternalReference ExternalReference::address_of_has_pending_message( + Isolate* isolate) { + return ExternalReference(isolate->has_pending_message_address()); +} + + +ExternalReference ExternalReference::address_of_pending_message_script( + Isolate* isolate) { + return ExternalReference(isolate->pending_message_script_address()); +} + + ExternalReference ExternalReference::address_of_min_int() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->min_int)); + return ExternalReference(reinterpret_cast(&double_constants.min_int)); } ExternalReference ExternalReference::address_of_one_half() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->one_half)); + return ExternalReference(reinterpret_cast(&double_constants.one_half)); } ExternalReference ExternalReference::address_of_minus_zero() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->minus_zero)); + return ExternalReference( + reinterpret_cast(&double_constants.minus_zero)); } ExternalReference ExternalReference::address_of_zero() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->zero)); + return ExternalReference(reinterpret_cast(&double_constants.zero)); } ExternalReference ExternalReference::address_of_uint8_max_value() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->uint8_max_value)); + return ExternalReference( + reinterpret_cast(&double_constants.uint8_max_value)); } ExternalReference ExternalReference::address_of_negative_infinity() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->negative_infinity)); + return ExternalReference( + reinterpret_cast(&double_constants.negative_infinity)); } ExternalReference ExternalReference::address_of_canonical_non_hole_nan() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->canonical_non_hole_nan)); + return ExternalReference( + reinterpret_cast(&double_constants.canonical_non_hole_nan)); } ExternalReference ExternalReference::address_of_the_hole_nan() { - return ExternalReference(reinterpret_cast( - &double_constants.Pointer()->the_hole_nan)); + return ExternalReference( + reinterpret_cast(&double_constants.the_hole_nan)); } @@ -1138,6 +1151,12 @@ ExternalReference ExternalReference::math_log_double_function( } +ExternalReference ExternalReference::page_flags(Page* page) { + return ExternalReference(reinterpret_cast
(page) + + MemoryChunk::kFlagsOffset); +} + + // Helper function to compute x^y, where y is known to be an // integer. Uses binary decomposition to limit the number of // multiplications; see the discussion in "Hacker's Delight" by Henry @@ -1158,6 +1177,20 @@ double power_double_int(double x, int y) { double power_double_double(double x, double y) { +#ifdef __MINGW64_VERSION_MAJOR + // MinGW64 has a custom implementation for pow. This handles certain + // special cases that are different. + if ((x == 0.0 || isinf(x)) && isfinite(y)) { + double f; + if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; + } + + if (x == 2.0) { + int y_int = static_cast(y); + if (y == y_int) return ldexp(1.0, y_int); + } +#endif + // The checks for special cases can be dropped in ia32 because it has already // been done in generated code before bailing out here. if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value(); diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index f960b58691..619c69c4b2 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -539,6 +539,8 @@ class ExternalReference BASE_EMBEDDED { DIRECT_GETTER_CALL }; + static void SetUp(); + typedef void* ExternalReferenceRedirector(void* original, Type type); ExternalReference(Builtins::CFunctionId id, Isolate* isolate); @@ -638,6 +640,9 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference handle_scope_level_address(); static ExternalReference scheduled_exception_address(Isolate* isolate); + static ExternalReference address_of_pending_message_obj(Isolate* isolate); + static ExternalReference address_of_has_pending_message(Isolate* isolate); + static ExternalReference address_of_pending_message_script(Isolate* isolate); // Static variables containing common double constants. static ExternalReference address_of_min_int(); @@ -654,6 +659,8 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate); + static ExternalReference page_flags(Page* page); + Address address() const {return reinterpret_cast
(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 4b6ae680a4..0970253c29 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -242,8 +242,11 @@ bool IsEqualNumber(void* first, void* second) { } -void ObjectLiteral::CalculateEmitStore() { - ZoneHashMap table(Literal::Match); +void ObjectLiteral::CalculateEmitStore(Zone* zone) { + ZoneAllocationPolicy allocator(zone); + + ZoneHashMap table(Literal::Match, ZoneHashMap::kDefaultHashMapCapacity, + allocator); for (int i = properties()->length() - 1; i >= 0; i--) { ObjectLiteral::Property* property = properties()->at(i); Literal* literal = property->key(); @@ -252,23 +255,23 @@ void ObjectLiteral::CalculateEmitStore() { // If the key of a computed property is in the table, do not emit // a store for the property later. if (property->kind() == ObjectLiteral::Property::COMPUTED && - table.Lookup(literal, hash, false) != NULL) { + table.Lookup(literal, hash, false, allocator) != NULL) { property->set_emit_store(false); } else { // Add key to the table. - table.Lookup(literal, hash, true); + table.Lookup(literal, hash, true, allocator); } } } -void TargetCollector::AddTarget(Label* target) { +void TargetCollector::AddTarget(Label* target, Zone* zone) { // Add the label to the collector, but discard duplicates. int length = targets_.length(); for (int i = 0; i < length; i++) { if (targets_[i] == target) return; } - targets_.Add(target); + targets_.Add(target, zone); } @@ -397,7 +400,8 @@ bool FunctionDeclaration::IsInlineable() const { // ---------------------------------------------------------------------------- // Recording of type feedback -void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { +void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, + Zone* zone) { // Record type feedback from the oracle in the AST. is_uninitialized_ = oracle->LoadIsUninitialized(this); if (is_uninitialized_) return; @@ -421,15 +425,17 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) { is_string_access_ = true; } else if (is_monomorphic_) { - receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this)); + receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), + zone); } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) { - receiver_types_.Reserve(kMaxKeyedPolymorphism); + receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); } } -void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) { +void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle, + Zone* zone) { Property* prop = target()->AsProperty(); ASSERT(prop != NULL); is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this); @@ -441,22 +447,23 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) { oracle->StoreReceiverTypes(this, name, &receiver_types_); } else if (is_monomorphic_) { // Record receiver type for monomorphic keyed stores. - receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this)); + receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone); } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) { - receiver_types_.Reserve(kMaxKeyedPolymorphism); + receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); } } -void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { +void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, + Zone* zone) { is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this); receiver_types_.Clear(); if (is_monomorphic_) { // Record receiver type for monomorphic keyed stores. - receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this)); + receiver_types_.Add(oracle->StoreMonomorphicReceiverType(this), zone); } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) { - receiver_types_.Reserve(kMaxKeyedPolymorphism); + receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(this->id(), &receiver_types_); } } @@ -507,7 +514,6 @@ bool Call::ComputeTarget(Handle type, Handle name) { // We don't know the target. return false; case MAP_TRANSITION: - case ELEMENTS_TRANSITION: case CONSTANT_TRANSITION: case NULL_DESCRIPTOR: // Perhaps something interesting is up in the prototype chain... @@ -784,7 +790,7 @@ bool RegExpCapture::IsAnchoredAtEnd() { // output formats are alike. class RegExpUnparser: public RegExpVisitor { public: - RegExpUnparser(); + explicit RegExpUnparser(Zone* zone); void VisitCharacterRange(CharacterRange that); SmartArrayPointer ToString() { return stream_.ToCString(); } #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, void* data); @@ -794,10 +800,11 @@ class RegExpUnparser: public RegExpVisitor { StringStream* stream() { return &stream_; } HeapStringAllocator alloc_; StringStream stream_; + Zone* zone_; }; -RegExpUnparser::RegExpUnparser() : stream_(&alloc_) { +RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) { } @@ -837,9 +844,9 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that, if (that->is_negated()) stream()->Add("^"); stream()->Add("["); - for (int i = 0; i < that->ranges()->length(); i++) { + for (int i = 0; i < that->ranges(zone_)->length(); i++) { if (i > 0) stream()->Add(" "); - VisitCharacterRange(that->ranges()->at(i)); + VisitCharacterRange(that->ranges(zone_)->at(i)); } stream()->Add("]"); return NULL; @@ -941,8 +948,8 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) { } -SmartArrayPointer RegExpTree::ToString() { - RegExpUnparser unparser; +SmartArrayPointer RegExpTree::ToString(Zone* zone) { + RegExpUnparser unparser(zone); Accept(&unparser, NULL); return unparser.ToString(); } @@ -962,6 +969,14 @@ RegExpDisjunction::RegExpDisjunction(ZoneList* alternatives) } +static int IncreaseBy(int previous, int increase) { + if (RegExpTree::kInfinity - previous < increase) { + return RegExpTree::kInfinity; + } else { + return previous + increase; + } +} + RegExpAlternative::RegExpAlternative(ZoneList* nodes) : nodes_(nodes) { ASSERT(nodes->length() > 1); @@ -969,13 +984,10 @@ RegExpAlternative::RegExpAlternative(ZoneList* nodes) max_match_ = 0; for (int i = 0; i < nodes->length(); i++) { RegExpTree* node = nodes->at(i); - min_match_ += node->min_match(); + int node_min_match = node->min_match(); + min_match_ = IncreaseBy(min_match_, node_min_match); int node_max_match = node->max_match(); - if (kInfinity - max_match_ < node_max_match) { - max_match_ = kInfinity; - } else { - max_match_ += node->max_match(); - } + max_match_ = IncreaseBy(max_match_, node_max_match); } } @@ -993,138 +1005,78 @@ CaseClause::CaseClause(Isolate* isolate, } -#define INCREASE_NODE_COUNT(NodeType) \ +#define REGULAR_NODE(NodeType) \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ increase_node_count(); \ } +#define DONT_OPTIMIZE_NODE(NodeType) \ + void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ + increase_node_count(); \ + add_flag(kDontOptimize); \ + add_flag(kDontInline); \ + add_flag(kDontSelfOptimize); \ + } +#define DONT_INLINE_NODE(NodeType) \ + void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ + increase_node_count(); \ + add_flag(kDontInline); \ + } +#define DONT_SELFOPTIMIZE_NODE(NodeType) \ + void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ + increase_node_count(); \ + add_flag(kDontSelfOptimize); \ + } -INCREASE_NODE_COUNT(VariableDeclaration) -INCREASE_NODE_COUNT(FunctionDeclaration) -INCREASE_NODE_COUNT(ModuleDeclaration) -INCREASE_NODE_COUNT(ImportDeclaration) -INCREASE_NODE_COUNT(ExportDeclaration) -INCREASE_NODE_COUNT(ModuleLiteral) -INCREASE_NODE_COUNT(ModuleVariable) -INCREASE_NODE_COUNT(ModulePath) -INCREASE_NODE_COUNT(ModuleUrl) -INCREASE_NODE_COUNT(Block) -INCREASE_NODE_COUNT(ExpressionStatement) -INCREASE_NODE_COUNT(EmptyStatement) -INCREASE_NODE_COUNT(IfStatement) -INCREASE_NODE_COUNT(ContinueStatement) -INCREASE_NODE_COUNT(BreakStatement) -INCREASE_NODE_COUNT(ReturnStatement) -INCREASE_NODE_COUNT(Conditional) -INCREASE_NODE_COUNT(Literal) -INCREASE_NODE_COUNT(ObjectLiteral) -INCREASE_NODE_COUNT(Assignment) -INCREASE_NODE_COUNT(Throw) -INCREASE_NODE_COUNT(Property) -INCREASE_NODE_COUNT(UnaryOperation) -INCREASE_NODE_COUNT(CountOperation) -INCREASE_NODE_COUNT(BinaryOperation) -INCREASE_NODE_COUNT(CompareOperation) -INCREASE_NODE_COUNT(ThisFunction) -INCREASE_NODE_COUNT(Call) -INCREASE_NODE_COUNT(CallNew) - -#undef INCREASE_NODE_COUNT - - -void AstConstructionVisitor::VisitWithStatement(WithStatement* node) { - increase_node_count(); - add_flag(kDontOptimize); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) { - increase_node_count(); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - -void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - -void AstConstructionVisitor::VisitForStatement(ForStatement* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - -void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) { - increase_node_count(); - add_flag(kDontSelfOptimize); -} - - -void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) { - increase_node_count(); - add_flag(kDontOptimize); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitTryFinallyStatement( - TryFinallyStatement* node) { - increase_node_count(); - add_flag(kDontOptimize); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) { - increase_node_count(); - add_flag(kDontOptimize); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) { - increase_node_count(); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* node) { - increase_node_count(); - add_flag(kDontOptimize); - add_flag(kDontInline); -} - - -void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) { - increase_node_count(); - // In theory, we'd have to add: - // if(node->var()->IsLookupSlot()) { add_flag(kDontInline); } - // However, node->var() is usually not bound yet at VariableProxy creation - // time, and LOOKUP variables only result from constructs that cannot - // be inlined anyway. -} - - -void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) { - increase_node_count(); - add_flag(kDontInline); // TODO(1322): Allow materialized literals. -} - - -void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) { - increase_node_count(); - add_flag(kDontInline); // TODO(1322): Allow materialized literals. -} - +REGULAR_NODE(VariableDeclaration) +REGULAR_NODE(FunctionDeclaration) +REGULAR_NODE(Block) +REGULAR_NODE(ExpressionStatement) +REGULAR_NODE(EmptyStatement) +REGULAR_NODE(IfStatement) +REGULAR_NODE(ContinueStatement) +REGULAR_NODE(BreakStatement) +REGULAR_NODE(ReturnStatement) +REGULAR_NODE(SwitchStatement) +REGULAR_NODE(Conditional) +REGULAR_NODE(Literal) +REGULAR_NODE(ObjectLiteral) +REGULAR_NODE(Assignment) +REGULAR_NODE(Throw) +REGULAR_NODE(Property) +REGULAR_NODE(UnaryOperation) +REGULAR_NODE(CountOperation) +REGULAR_NODE(BinaryOperation) +REGULAR_NODE(CompareOperation) +REGULAR_NODE(ThisFunction) +REGULAR_NODE(Call) +REGULAR_NODE(CallNew) +// In theory, for VariableProxy we'd have to add: +// if (node->var()->IsLookupSlot()) add_flag(kDontInline); +// But node->var() is usually not bound yet at VariableProxy creation time, and +// LOOKUP variables only result from constructs that cannot be inlined anyway. +REGULAR_NODE(VariableProxy) + +DONT_OPTIMIZE_NODE(ModuleDeclaration) +DONT_OPTIMIZE_NODE(ImportDeclaration) +DONT_OPTIMIZE_NODE(ExportDeclaration) +DONT_OPTIMIZE_NODE(ModuleLiteral) +DONT_OPTIMIZE_NODE(ModuleVariable) +DONT_OPTIMIZE_NODE(ModulePath) +DONT_OPTIMIZE_NODE(ModuleUrl) +DONT_OPTIMIZE_NODE(WithStatement) +DONT_OPTIMIZE_NODE(TryCatchStatement) +DONT_OPTIMIZE_NODE(TryFinallyStatement) +DONT_OPTIMIZE_NODE(DebuggerStatement) +DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral) + +DONT_INLINE_NODE(FunctionLiteral) +DONT_INLINE_NODE(RegExpLiteral) // TODO(1322): Allow materialized literals. +DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals. + +DONT_SELFOPTIMIZE_NODE(DoWhileStatement) +DONT_SELFOPTIMIZE_NODE(WhileStatement) +DONT_SELFOPTIMIZE_NODE(ForStatement) +DONT_SELFOPTIMIZE_NODE(ForInStatement) void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { increase_node_count(); @@ -1142,6 +1094,11 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { } } +#undef REGULAR_NODE +#undef DONT_OPTIMIZE_NODE +#undef DONT_INLINE_NODE +#undef DONT_SELFOPTIMIZE_NODE + Handle Literal::ToString() { if (handle_->IsString()) return Handle::cast(handle_); diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b827302ebd..02ece7fe61 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -266,16 +266,17 @@ class Statement: public AstNode { class SmallMapList { public: SmallMapList() {} - explicit SmallMapList(int capacity) : list_(capacity) {} + SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {} - void Reserve(int capacity) { list_.Reserve(capacity); } + void Reserve(int capacity, Zone* zone) { list_.Reserve(capacity, zone); } void Clear() { list_.Clear(); } + void Sort() { list_.Sort(); } bool is_empty() const { return list_.is_empty(); } int length() const { return list_.length(); } - void Add(Handle handle) { - list_.Add(handle.location()); + void Add(Handle handle, Zone* zone) { + list_.Add(handle.location(), zone); } Handle at(int i) const { @@ -415,13 +416,15 @@ class Block: public BreakableStatement { public: DECLARE_NODE_TYPE(Block) - void AddStatement(Statement* statement) { statements_.Add(statement); } + void AddStatement(Statement* statement, Zone* zone) { + statements_.Add(statement, zone); + } ZoneList* statements() { return &statements_; } bool is_initializer_block() const { return is_initializer_block_; } - Scope* block_scope() const { return block_scope_; } - void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; } + Scope* scope() const { return scope_; } + void set_scope(Scope* scope) { scope_ = scope; } protected: template friend class AstNodeFactory; @@ -429,17 +432,18 @@ class Block: public BreakableStatement { Block(Isolate* isolate, ZoneStringList* labels, int capacity, - bool is_initializer_block) + bool is_initializer_block, + Zone* zone) : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY), - statements_(capacity), + statements_(capacity, zone), is_initializer_block_(is_initializer_block), - block_scope_(NULL) { + scope_(NULL) { } private: ZoneList statements_; bool is_initializer_block_; - Scope* block_scope_; + Scope* scope_; }; @@ -594,7 +598,7 @@ class Module: public AstNode { Interface* interface() const { return interface_; } protected: - Module() : interface_(Interface::NewModule()) {} + explicit Module(Zone* zone) : interface_(Interface::NewModule(zone)) {} explicit Module(Interface* interface) : interface_(interface) {} private: @@ -607,6 +611,7 @@ class ModuleLiteral: public Module { DECLARE_NODE_TYPE(ModuleLiteral) Block* body() const { return body_; } + Handle context() const { return context_; } protected: template friend class AstNodeFactory; @@ -618,6 +623,7 @@ class ModuleLiteral: public Module { private: Block* body_; + Handle context_; }; @@ -647,8 +653,9 @@ class ModulePath: public Module { protected: template friend class AstNodeFactory; - ModulePath(Module* module, Handle name) - : module_(module), + ModulePath(Module* module, Handle name, Zone* zone) + : Module(zone), + module_(module), name_(name) { } @@ -667,7 +674,8 @@ class ModuleUrl: public Module { protected: template friend class AstNodeFactory; - explicit ModuleUrl(Handle url) : url_(url) { + ModuleUrl(Handle url, Zone* zone) + : Module(zone), url_(url) { } private: @@ -1095,12 +1103,12 @@ class IfStatement: public Statement { // stack in the compiler; this should probably be reworked. class TargetCollector: public AstNode { public: - TargetCollector() : targets_(0) { } + explicit TargetCollector(Zone* zone) : targets_(0, zone) { } // Adds a jump target to the collector. The collector stores a pointer not // a copy of the target to make binding work, so make sure not to pass in // references to something on the stack. - void AddTarget(Label* target); + void AddTarget(Label* target, Zone* zone); // Virtual behaviour. TargetCollectors are never part of the AST. virtual void Accept(AstVisitor* v) { UNREACHABLE(); } @@ -1358,7 +1366,7 @@ class ObjectLiteral: public MaterializedLiteral { // Mark all computed expressions that are bound to a key that // is shadowed by a later occurrence of the same key. For the // marked expressions, no store code is emitted. - void CalculateEmitStore(); + void CalculateEmitStore(Zone* zone); enum Flags { kNoFlags = 0, @@ -1523,7 +1531,7 @@ class Property: public Expression { bool IsFunctionPrototype() const { return is_function_prototype_; } // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); + void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); virtual bool IsMonomorphic() { return is_monomorphic_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } bool IsArrayLength() { return is_array_length_; } @@ -1796,7 +1804,7 @@ class CountOperation: public Expression { virtual void MarkAsStatement() { is_prefix_ = true; } - void RecordTypeFeedback(TypeFeedbackOracle* oracle); + void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* znoe); virtual bool IsMonomorphic() { return is_monomorphic_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } @@ -1949,7 +1957,7 @@ class Assignment: public Expression { void mark_block_end() { block_end_ = true; } // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); + void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); virtual bool IsMonomorphic() { return is_monomorphic_; } virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } @@ -2208,8 +2216,8 @@ class RegExpTree: public ZoneObject { // Returns the interval of registers used for captures within this // expression. virtual Interval CaptureRegisters() { return Interval::Empty(); } - virtual void AppendToText(RegExpText* text); - SmartArrayPointer ToString(); + virtual void AppendToText(RegExpText* text, Zone* zone); + SmartArrayPointer ToString(Zone* zone); #define MAKE_ASTYPE(Name) \ virtual RegExp##Name* As##Name(); \ virtual bool Is##Name(); @@ -2294,7 +2302,7 @@ class CharacterSet BASE_EMBEDDED { explicit CharacterSet(ZoneList* ranges) : ranges_(ranges), standard_set_type_(0) {} - ZoneList* ranges(); + ZoneList* ranges(Zone* zone); uc16 standard_set_type() { return standard_set_type_; } void set_standard_set_type(uc16 special_set_type) { standard_set_type_ = special_set_type; @@ -2325,11 +2333,11 @@ class RegExpCharacterClass: public RegExpTree { virtual bool IsTextElement() { return true; } virtual int min_match() { return 1; } virtual int max_match() { return 1; } - virtual void AppendToText(RegExpText* text); + virtual void AppendToText(RegExpText* text, Zone* zone); CharacterSet character_set() { return set_; } // TODO(lrn): Remove need for complex version if is_standard that // recognizes a mangled standard set and just do { return set_.is_special(); } - bool is_standard(); + bool is_standard(Zone* zone); // Returns a value representing the standard character set if is_standard() // returns true. // Currently used values are: @@ -2342,7 +2350,7 @@ class RegExpCharacterClass: public RegExpTree { // . : non-unicode non-newline // * : All characters uc16 standard_type() { return set_.standard_set_type(); } - ZoneList* ranges() { return set_.ranges(); } + ZoneList* ranges(Zone* zone) { return set_.ranges(zone); } bool is_negated() { return is_negated_; } private: @@ -2362,7 +2370,7 @@ class RegExpAtom: public RegExpTree { virtual bool IsTextElement() { return true; } virtual int min_match() { return data_.length(); } virtual int max_match() { return data_.length(); } - virtual void AppendToText(RegExpText* text); + virtual void AppendToText(RegExpText* text, Zone* zone); Vector data() { return data_; } int length() { return data_.length(); } private: @@ -2372,7 +2380,7 @@ class RegExpAtom: public RegExpTree { class RegExpText: public RegExpTree { public: - RegExpText() : elements_(2), length_(0) {} + explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {} virtual void* Accept(RegExpVisitor* visitor, void* data); virtual RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success); @@ -2381,9 +2389,9 @@ class RegExpText: public RegExpTree { virtual bool IsTextElement() { return true; } virtual int min_match() { return length_; } virtual int max_match() { return length_; } - virtual void AppendToText(RegExpText* text); - void AddElement(TextElement elm) { - elements_.Add(elm); + virtual void AppendToText(RegExpText* text, Zone* zone); + void AddElement(TextElement elm, Zone* zone) { + elements_.Add(elm, zone); length_ += elm.length(); } ZoneList* elements() { return &elements_; } @@ -2691,20 +2699,21 @@ class AstNodeFactory BASE_EMBEDDED { } ModulePath* NewModulePath(Module* origin, Handle name) { - ModulePath* module = new(zone_) ModulePath(origin, name); + ModulePath* module = new(zone_) ModulePath(origin, name, zone_); VISIT_AND_RETURN(ModulePath, module) } ModuleUrl* NewModuleUrl(Handle url) { - ModuleUrl* module = new(zone_) ModuleUrl(url); + ModuleUrl* module = new(zone_) ModuleUrl(url, zone_); VISIT_AND_RETURN(ModuleUrl, module) } Block* NewBlock(ZoneStringList* labels, int capacity, - bool is_initializer_block) { + bool is_initializer_block, + Zone* zone) { Block* block = new(zone_) Block( - isolate_, labels, capacity, is_initializer_block); + isolate_, labels, capacity, is_initializer_block, zone); VISIT_AND_RETURN(Block, block) } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 0e95b4b839..33cbb8149f 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -484,8 +484,8 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { global_context()->set_initial_object_prototype(*prototype); SetPrototype(object_fun, prototype); - object_function_map-> - set_instance_descriptors(heap->empty_descriptor_array()); + object_function_map->set_instance_descriptors( + heap->empty_descriptor_array()); } // Allocate the empty function as the prototype for function ECMAScript @@ -516,12 +516,10 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { function_instance_map_writable_prototype_->set_prototype(*empty_function); // Allocate the function map first and then patch the prototype later - Handle empty_fm = factory->CopyMapDropDescriptors( - function_without_prototype_map); - empty_fm->set_instance_descriptors( - function_without_prototype_map->instance_descriptors()); - empty_fm->set_prototype(global_context()->object_function()->prototype()); - empty_function->set_map(*empty_fm); + Handle empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE); + empty_function_map->set_prototype( + global_context()->object_function()->prototype()); + empty_function->set_map(*empty_function_map); return empty_function; } @@ -1011,7 +1009,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, proto_map->set_prototype(global_context()->initial_object_prototype()); Handle proto = factory->NewJSObjectFromMap(proto_map); proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, - heap->empty_string()); + heap->query_colon_symbol()); proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, heap->false_value()); proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, @@ -1094,7 +1092,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Check the state of the object. ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastElements()); + ASSERT(result->HasFastObjectElements()); #endif } @@ -1187,7 +1185,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Check the state of the object. ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastElements()); + ASSERT(result->HasFastObjectElements()); #endif } @@ -1634,10 +1632,11 @@ bool Genesis::InstallNatives() { // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT // transition easy to trap. Moreover, they rarely are smi-only. MaybeObject* maybe_map = - array_function->initial_map()->CopyDropTransitions(); + array_function->initial_map()->CopyDropTransitions( + DescriptorArray::MAY_BE_SHARED); Map* new_map; - if (!maybe_map->To(&new_map)) return false; - new_map->set_elements_kind(FAST_ELEMENTS); + if (!maybe_map->To(&new_map)) return false; + new_map->set_elements_kind(FAST_HOLEY_ELEMENTS); array_function->set_initial_map(new_map); // Make "length" magic on instances. @@ -2094,14 +2093,10 @@ bool Genesis::InstallJSBuiltins(Handle builtins) { Handle function = Handle(JSFunction::cast(function_object)); builtins->set_javascript_builtin(id, *function); - Handle shared - = Handle(function->shared()); - if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) { + if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) { return false; } - // Set the code object on the function object. - function->ReplaceCode(function->shared()->code()); - builtins->set_javascript_builtin_code(id, shared->code()); + builtins->set_javascript_builtin_code(id, function->shared()->code()); } return true; } @@ -2159,7 +2154,7 @@ void Genesis::TransferNamedProperties(Handle from, Handle descs = Handle(from->map()->instance_descriptors()); for (int i = 0; i < descs->number_of_descriptors(); i++) { - PropertyDetails details = PropertyDetails(descs->GetDetails(i)); + PropertyDetails details = descs->GetDetails(i); switch (details.type()) { case FIELD: { HandleScope inner; @@ -2197,7 +2192,6 @@ void Genesis::TransferNamedProperties(Handle from, break; } case MAP_TRANSITION: - case ELEMENTS_TRANSITION: case CONSTANT_TRANSITION: case NULL_DESCRIPTOR: // Ignore non-properties. diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 01e88f5593..64ec3d9fcc 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -200,9 +200,12 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, array->set_elements(heap->empty_fixed_array()); if (!FLAG_smi_only_arrays) { Context* global_context = isolate->context()->global_context(); - if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS && - !global_context->object_js_array_map()->IsUndefined()) { - array->set_map(Map::cast(global_context->object_js_array_map())); + if (array->GetElementsKind() == GetInitialFastElementsKind() && + !global_context->js_array_maps()->IsUndefined()) { + FixedArray* map_array = + FixedArray::cast(global_context->js_array_maps()); + array->set_map(Map::cast(map_array-> + get(TERMINAL_FAST_ELEMENTS_KIND))); } } } else { @@ -222,6 +225,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; } + ElementsKind elements_kind = array->GetElementsKind(); + if (!IsFastHoleyElementsKind(elements_kind)) { + elements_kind = GetHoleyElementsKind(elements_kind); + MaybeObject* maybe_array = + array->TransitionElementsKind(elements_kind); + if (maybe_array->IsFailure()) return maybe_array; + } // We do not use SetContent to skip the unnecessary elements type check. array->set_elements(FixedArray::cast(fixed_array)); array->set_length(Smi::cast(obj)); @@ -250,7 +260,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, // Allocate an appropriately typed elements array. MaybeObject* maybe_elms; ElementsKind elements_kind = array->GetElementsKind(); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { + if (IsFastDoubleElementsKind(elements_kind)) { maybe_elms = heap->AllocateUninitializedFixedDoubleArray( number_of_elements); } else { @@ -261,13 +271,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, // Fill in the content switch (array->GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: { + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: { FixedArray* smi_elms = FixedArray::cast(elms); for (int index = 0; index < number_of_elements; index++) { smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); } break; } + case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: { AssertNoAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); @@ -277,6 +289,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, } break; } + case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); for (int index = 0; index < number_of_elements; index++) { @@ -412,7 +425,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( HeapObject* elms = array->elements(); Map* map = elms->map(); if (map == heap->fixed_array_map()) { - if (args == NULL || array->HasFastElements()) return elms; + if (args == NULL || array->HasFastObjectElements()) return elms; if (array->HasFastDoubleElements()) { ASSERT(elms == heap->empty_fixed_array()); MaybeObject* maybe_transition = @@ -422,7 +435,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( } } else if (map == heap->fixed_cow_array_map()) { MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); - if (args == NULL || array->HasFastElements() || + if (args == NULL || array->HasFastObjectElements() || maybe_writable_result->IsFailure()) { return maybe_writable_result; } @@ -516,8 +529,8 @@ BUILTIN(ArrayPush) { } FixedArray* new_elms = FixedArray::cast(obj); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, 0, len); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; @@ -588,7 +601,7 @@ BUILTIN(ArrayShift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); @@ -630,7 +643,7 @@ BUILTIN(ArrayUnshift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -652,8 +665,8 @@ BUILTIN(ArrayUnshift) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* new_elms = FixedArray::cast(obj); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, to_add, len); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; array->set_elements(elms); @@ -682,7 +695,7 @@ BUILTIN(ArraySlice) { int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastTypeElements() || + if (!array->HasFastSmiOrObjectElements() || !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -698,7 +711,7 @@ BUILTIN(ArraySlice) { bool is_arguments_object_with_fast_elements = receiver->IsJSObject() && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastTypeElements(); + && JSObject::cast(receiver)->HasFastSmiOrObjectElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -763,9 +776,9 @@ BUILTIN(ArraySlice) { JSArray* result_array; if (!maybe_array->To(&result_array)) return maybe_array; - CopyObjectToObjectElements(elms, FAST_ELEMENTS, k, + CopyObjectToObjectElements(elms, elements_kind, k, FixedArray::cast(result_array->elements()), - FAST_ELEMENTS, 0, result_len); + elements_kind, 0, result_len); return result_array; } @@ -786,7 +799,7 @@ BUILTIN(ArraySplice) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); @@ -837,9 +850,9 @@ BUILTIN(ArraySplice) { { // Fill newly created array. - CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start, + CopyObjectToObjectElements(elms, elements_kind, actual_start, FixedArray::cast(result_array->elements()), - FAST_ELEMENTS, 0, actual_delete_count); + elements_kind, 0, actual_delete_count); } int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; @@ -888,12 +901,13 @@ BUILTIN(ArraySplice) { { // Copy the part before actual_start as is. - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, 0, actual_start); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, + new_elms, kind, 0, actual_start); const int to_copy = len - actual_delete_count - actual_start; - CopyObjectToObjectElements(elms, FAST_ELEMENTS, + CopyObjectToObjectElements(elms, kind, actual_start + actual_delete_count, - new_elms, FAST_ELEMENTS, + new_elms, kind, actual_start + item_count, to_copy); } @@ -940,11 +954,12 @@ BUILTIN(ArrayConcat) { // and calculating total length. int n_arguments = args.length(); int result_len = 0; - ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS; + ElementsKind elements_kind = GetInitialFastElementsKind(); for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() - || JSArray::cast(arg)->GetPrototype() != array_proto) { + if (!arg->IsJSArray() || + !JSArray::cast(arg)->HasFastSmiOrObjectElements() || + JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } @@ -961,8 +976,18 @@ BUILTIN(ArrayConcat) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) { - elements_kind = FAST_ELEMENTS; + if (!JSArray::cast(arg)->HasFastSmiElements()) { + if (IsFastSmiElementsKind(elements_kind)) { + if (IsFastHoleyElementsKind(elements_kind)) { + elements_kind = FAST_HOLEY_ELEMENTS; + } else { + elements_kind = FAST_ELEMENTS; + } + } + } + + if (JSArray::cast(arg)->HasFastHoleyElements()) { + elements_kind = GetHoleyElementsKind(elements_kind); } } @@ -982,8 +1007,8 @@ BUILTIN(ArrayConcat) { JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); FixedArray* elms = FixedArray::cast(array->elements()); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - result_elms, FAST_ELEMENTS, + CopyObjectToObjectElements(elms, elements_kind, 0, + result_elms, elements_kind, start_pos, len); start_pos += len; } @@ -1103,7 +1128,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( CustomArguments custom(isolate); v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), - data_obj, *function, raw_holder); + isolate, data_obj, *function, raw_holder); v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( custom.end(), @@ -1143,68 +1168,6 @@ BUILTIN(HandleApiCallConstruct) { } -#ifdef DEBUG - -static void VerifyTypeCheck(Handle object, - Handle function) { - ASSERT(function->shared()->IsApiFunction()); - FunctionTemplateInfo* info = function->shared()->get_api_func_data(); - if (info->signature()->IsUndefined()) return; - SignatureInfo* signature = SignatureInfo::cast(info->signature()); - Object* receiver_type = signature->receiver(); - if (receiver_type->IsUndefined()) return; - FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type); - ASSERT(object->IsInstanceOf(type)); -} - -#endif - - -BUILTIN(FastHandleApiCall) { - ASSERT(!CalledAsConstructor(isolate)); - Heap* heap = isolate->heap(); - const bool is_construct = false; - - // We expect four more arguments: callback, function, call data, and holder. - const int args_length = args.length() - 4; - ASSERT(args_length >= 0); - - Object* callback_obj = args[args_length]; - - v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( - &args[args_length + 1], - &args[0] - 1, - args_length - 1, - is_construct); - -#ifdef DEBUG - VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()), - Utils::OpenHandle(*new_args.Callee())); -#endif - HandleScope scope(isolate); - Object* result; - v8::Handle value; - { - // Leaving JavaScript. - VMState state(isolate, EXTERNAL); - ExternalCallbackScope call_scope(isolate, - v8::ToCData
(callback_obj)); - v8::InvocationCallback callback = - v8::ToCData(callback_obj); - - value = callback(new_args); - } - if (value.IsEmpty()) { - result = heap->undefined_value(); - } else { - result = *reinterpret_cast(*value); - } - - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return result; -} - - // Helper function to handle calls to non-function objects created through the // API. The object can be called as either a constructor (using new) or just as // a function (without new). @@ -1243,7 +1206,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( CustomArguments custom(isolate); v8::ImplementationUtilities::PrepareArgumentsData(custom.end(), - call_data->data(), constructor, obj); + isolate, call_data->data(), constructor, obj); v8::Arguments new_args = v8::ImplementationUtilities::NewArguments( custom.end(), &args[0] - 1, diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index f079139d45..3ea33938eb 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -56,7 +56,6 @@ enum BuiltinExtraArguments { V(ArrayConcat, NO_EXTRA_ARGUMENTS) \ \ V(HandleApiCall, NEEDS_CALLED_FUNCTION) \ - V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \ diff --git a/deps/v8/src/bytecodes-irregexp.h b/deps/v8/src/bytecodes-irregexp.h index b13efb36f8..c7cc66e527 100644 --- a/deps/v8/src/bytecodes-irregexp.h +++ b/deps/v8/src/bytecodes-irregexp.h @@ -72,24 +72,23 @@ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \ V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \ V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \ -V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 addr32 */ \ -V(CHECK_LT, 32, 8) /* bc8 pad8 uc16 addr32 */ \ -V(CHECK_GT, 33, 8) /* bc8 pad8 uc16 addr32 */ \ -V(CHECK_NOT_BACK_REF, 34, 8) /* bc8 reg_idx24 addr32 */ \ -V(CHECK_NOT_BACK_REF_NO_CASE, 35, 8) /* bc8 reg_idx24 addr32 */ \ -V(CHECK_NOT_REGS_EQUAL, 36, 12) /* bc8 regidx24 reg_idx32 addr32 */ \ -V(LOOKUP_MAP1, 37, 12) /* bc8 pad8 start16 bit_map_addr32 addr32 */ \ -V(LOOKUP_MAP2, 38, 96) /* bc8 pad8 start16 half_nibble_map_addr32* */ \ -V(LOOKUP_MAP8, 39, 96) /* bc8 pad8 start16 byte_map addr32* */ \ -V(LOOKUP_HI_MAP8, 40, 96) /* bc8 start24 byte_map_addr32 addr32* */ \ -V(CHECK_REGISTER_LT, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \ -V(CHECK_REGISTER_GE, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \ -V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \ -V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \ -V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \ -V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \ -V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \ -V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */ +V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \ +V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \ +V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \ +V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \ +V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \ +V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \ +V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \ +V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \ +V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \ +V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \ +V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \ +V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \ +V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \ +V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \ +V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \ +V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \ +V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */ #define DECLARE_BYTECODES(name, code, length) \ static const int BC_##name = code; diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 11016c8238..8f316606c2 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -73,21 +73,12 @@ SmartArrayPointer CodeStub::GetName() { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { - code->set_major_key(MajorKey()); - Isolate* isolate = masm->isolate(); SmartArrayPointer name = GetName(); PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name)); GDBJIT(AddCode(GDBJITInterface::STUB, *name, code)); Counters* counters = isolate->counters(); counters->total_stubs_code_size()->Increment(code->instruction_size()); - -#ifdef ENABLE_DISASSEMBLER - if (FLAG_print_code_stubs) { - code->Disassemble(*name); - PrintF("\n"); - } -#endif } @@ -125,8 +116,16 @@ Handle CodeStub::GetCode() { GetICState()); Handle new_object = factory->NewCode( desc, flags, masm.CodeObject(), NeedsImmovableCode()); - RecordCodeGeneration(*new_object, &masm); + new_object->set_major_key(MajorKey()); FinishCode(new_object); + RecordCodeGeneration(*new_object, &masm); + +#ifdef ENABLE_DISASSEMBLER + if (FLAG_print_code_stubs) { + new_object->Disassemble(*GetName()); + PrintF("\n"); + } +#endif if (UseSpecialCache()) { AddToSpecialCache(new_object); @@ -263,10 +262,13 @@ void JSEntryStub::FinishCode(Handle code) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastElement(masm); break; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); break; case EXTERNAL_BYTE_ELEMENTS: @@ -293,7 +295,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: { + case FAST_HOLEY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: { KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_, elements_kind_, @@ -301,6 +305,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { } break; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_js_array_, grow_mode_); @@ -431,24 +436,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const { void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { Label fail; + ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_)); if (!FLAG_trace_elements_transitions) { - if (to_ == FAST_ELEMENTS) { - if (from_ == FAST_SMI_ONLY_ELEMENTS) { - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); - } else if (from_ == FAST_DOUBLE_ELEMENTS) { + if (IsFastSmiOrObjectElementsKind(to_)) { + if (IsFastSmiOrObjectElementsKind(from_)) { + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm); + } else if (IsFastDoubleElementsKind(from_)) { + ASSERT(!IsFastSmiElementsKind(to_)); ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); } else { UNREACHABLE(); } KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_jsarray_, - FAST_ELEMENTS, + to_, grow_mode_); - } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) { - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + } else if (IsFastSmiElementsKind(from_) && + IsFastDoubleElementsKind(to_)) { + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_, grow_mode_); + } else if (IsFastDoubleElementsKind(from_)) { + ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm); } else { UNREACHABLE(); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index b67e961ac7..5c8717838f 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub { virtual void FinishCode(Handle code) { code->set_compare_state(state_); + code->set_compare_operation(op_); } virtual CodeStub::Major MajorKey() { return CompareIC; } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 50d70f265d..08a777f2ad 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction(); class ElementsTransitionGenerator : public AllStatic { public: - static void GenerateSmiOnlyToObject(MacroAssembler* masm); - static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail); + static void GenerateMapChangeElementsTransition(MacroAssembler* masm); + static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail); static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail); private: diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h index 3b9c59ea53..b73e8ac750 100644 --- a/deps/v8/src/compiler-intrinsics.h +++ b/deps/v8/src/compiler-intrinsics.h @@ -40,6 +40,9 @@ class CompilerIntrinsics { // Returns number of zero bits following most significant 1 bit. // Undefined for zero value. INLINE(static int CountLeadingZeros(uint32_t value)); + + // Returns the number of bits set. + INLINE(static int CountSetBits(uint32_t value)); }; #ifdef __GNUC__ @@ -51,6 +54,10 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { return __builtin_clz(value); } +int CompilerIntrinsics::CountSetBits(uint32_t value) { + return __builtin_popcount(value); +} + #elif defined(_MSC_VER) #pragma intrinsic(_BitScanForward) @@ -68,6 +75,16 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { return 31 - static_cast(result); } +int CompilerIntrinsics::CountSetBits(uint32_t value) { + // Manually count set bits. + value = ((value >> 1) & 0x55555555) + (value & 0x55555555); + value = ((value >> 2) & 0x33333333) + (value & 0x33333333); + value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f); + value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff); + value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff); + return value; +} + #else #error Unsupported compiler #endif diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ecac5cba69..d44718bc0f 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -294,8 +294,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } Handle global_context(info->closure()->context()->global_context()); - TypeFeedbackOracle oracle(code, global_context, info->isolate()); - HGraphBuilder builder(info, &oracle); + TypeFeedbackOracle oracle(code, global_context, info->isolate(), + info->isolate()->zone()); + HGraphBuilder builder(info, &oracle, info->isolate()->zone()); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(); if (info->isolate()->has_pending_exception()) { @@ -304,7 +305,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } if (graph != NULL) { - Handle optimized_code = graph->Compile(info); + Handle optimized_code = graph->Compile(info, graph->zone()); if (!optimized_code.is_null()) { info->SetCode(optimized_code); FinishOptimization(info->closure(), start); @@ -346,7 +347,8 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) { // the compilation info is set if compilation succeeded. bool succeeded = MakeCode(info); if (!info->shared_info().is_null()) { - Handle scope_info = ScopeInfo::Create(info->scope()); + Handle scope_info = ScopeInfo::Create(info->scope(), + info->isolate()->zone()); info->shared_info()->set_scope_info(*scope_info); } return succeeded; @@ -420,7 +422,7 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { lit->name(), lit->materialized_literal_count(), info->code(), - ScopeInfo::Create(info->scope())); + ScopeInfo::Create(info->scope(), info->isolate()->zone())); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); Compiler::SetFunctionInfo(result, lit, true, script); @@ -462,7 +464,7 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { script, Debugger::NO_AFTER_COMPILE_FLAGS); #endif - live_edit_tracker.RecordFunctionInfo(result, lit); + live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone()); return result; } @@ -651,7 +653,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // info initialization is important since set_scope_info might // trigger a GC, causing the ASSERT below to be invalid if the code // was flushed. By setting the code object last we avoid this. - Handle scope_info = ScopeInfo::Create(info->scope()); + Handle scope_info = + ScopeInfo::Create(info->scope(), info->isolate()->zone()); shared->set_scope_info(*scope_info); shared->set_code(*code); if (!function.is_null()) { @@ -728,7 +731,7 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) || (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) { ASSERT(!info.code().is_null()); - scope_info = ScopeInfo::Create(info.scope()); + scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone()); } else { return Handle::null(); } @@ -747,7 +750,7 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, // the resulting function. SetExpectedNofPropertiesFromEstimate(result, literal->expected_property_count()); - live_edit_tracker.RecordFunctionInfo(result, literal); + live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone()); return result; } diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index af5cb036c6..d154b82ca0 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -106,9 +106,7 @@ enum BindingFlags { V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ - V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \ - V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \ - V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \ + V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(JSON_OBJECT_INDEX, JSObject, json_object) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ @@ -248,9 +246,7 @@ class Context: public FixedArray { OBJECT_FUNCTION_INDEX, INTERNAL_ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX, - SMI_JS_ARRAY_MAP_INDEX, - DOUBLE_JS_ARRAY_MAP_INDEX, - OBJECT_JS_ARRAY_MAP_INDEX, + JS_ARRAY_MAPS_INDEX, DATE_FUNCTION_INDEX, JSON_OBJECT_INDEX, REGEXP_FUNCTION_INDEX, @@ -373,18 +369,6 @@ class Context: public FixedArray { Object* OptimizedFunctionsListHead(); void ClearOptimizedFunctions(); - static int GetContextMapIndexFromElementsKind( - ElementsKind elements_kind) { - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - return Context::DOUBLE_JS_ARRAY_MAP_INDEX; - } else if (elements_kind == FAST_ELEMENTS) { - return Context::OBJECT_JS_ARRAY_MAP_INDEX; - } else { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); - return Context::SMI_JS_ARRAY_MAP_INDEX; - } - } - #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \ void set_##name(type* value) { \ ASSERT(IsGlobalContext()); \ @@ -397,7 +381,7 @@ class Context: public FixedArray { GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS) #undef GLOBAL_CONTEXT_FIELD_ACCESSORS - // Lookup the the slot called name, starting with the current context. + // Lookup the slot called name, starting with the current context. // There are three possibilities: // // 1) result->IsContext(): diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index b098a1c29c..77b260f036 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -228,9 +228,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, } ASSERT(number != 0); - // The double could be constructed faster from number (mantissa), exponent - // and sign. Assuming it's a rare case more simple code is used. - return static_cast(negative ? -number : number) * pow(2.0, exponent); + return ldexp(static_cast(negative ? -number : number), exponent); } diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 45781cf0d4..7a01d55148 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -26,7 +26,8 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows. +// Defined when linking against shared lib on Windows. +#if defined(USING_V8_SHARED) && !defined(V8_SHARED) #define V8_SHARED #endif @@ -315,151 +316,143 @@ static size_t convertToUint(Local value_in, TryCatch* try_catch) { } -const char kArrayBufferReferencePropName[] = "_is_array_buffer_"; -const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_"; +const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; -Handle Shell::CreateExternalArray(const Arguments& args, - ExternalArrayType type, - size_t element_size) { - TryCatch try_catch; - bool is_array_buffer_construct = element_size == 0; - if (is_array_buffer_construct) { - type = v8::kExternalByteArray; - element_size = 1; +Handle Shell::CreateExternalArrayBuffer(int32_t length) { + static const int32_t kMaxSize = 0x7fffffff; + // Make sure the total size fits into a (signed) int. + if (length < 0 || length > kMaxSize) { + return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)")); + } + uint8_t* data = new uint8_t[length]; + if (data == NULL) { + return ThrowException(String::New("Memory allocation failed.")); } - ASSERT(element_size == 1 || element_size == 2 || element_size == 4 || - element_size == 8); + memset(data, 0, length); + + Handle buffer = Object::New(); + buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); + Persistent persistent_array = Persistent::New(buffer); + persistent_array.MakeWeak(data, ExternalArrayWeakCallback); + persistent_array.MarkIndependent(); + V8::AdjustAmountOfExternalAllocatedMemory(length); + + buffer->SetIndexedPropertiesToExternalArrayData( + data, v8::kExternalByteArray, length); + buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly); + + return buffer; +} + + +Handle Shell::CreateExternalArrayBuffer(const Arguments& args) { if (args.Length() == 0) { return ThrowException( - String::New("Array constructor must have at least one " - "parameter.")); + String::New("ArrayBuffer constructor must have one parameter.")); } - bool first_arg_is_array_buffer = - args[0]->IsObject() && - args[0]->ToObject()->Get( - String::New(kArrayBufferMarkerPropName))->IsTrue(); + TryCatch try_catch; + int32_t length = convertToUint(args[0], &try_catch); + if (try_catch.HasCaught()) return try_catch.Exception(); + + return CreateExternalArrayBuffer(length); +} + + +Handle Shell::CreateExternalArray(const Arguments& args, + ExternalArrayType type, + int32_t element_size) { + TryCatch try_catch; + ASSERT(element_size == 1 || element_size == 2 || + element_size == 4 || element_size == 8); + // Currently, only the following constructors are supported: // TypedArray(unsigned long length) // TypedArray(ArrayBuffer buffer, // optional unsigned long byteOffset, // optional unsigned long length) - if (args.Length() > 3) { + Handle buffer; + int32_t length; + int32_t byteLength; + int32_t byteOffset; + if (args.Length() == 0) { return ThrowException( - String::New("Array constructor from ArrayBuffer must " - "have 1-3 parameters.")); + String::New("Array constructor must have at least one parameter.")); } - - Local length_value = (args.Length() < 3) - ? (first_arg_is_array_buffer - ? args[0]->ToObject()->Get(String::New("length")) - : args[0]) - : args[2]; - size_t length = convertToUint(length_value, &try_catch); - if (try_catch.HasCaught()) return try_catch.Exception(); - - void* data = NULL; - size_t offset = 0; - - Handle array = Object::New(); - if (first_arg_is_array_buffer) { - Handle derived_from = args[0]->ToObject(); - data = derived_from->GetIndexedPropertiesExternalArrayData(); - - size_t array_buffer_length = convertToUint( - derived_from->Get(String::New("length")), - &try_catch); + if (args[0]->IsObject() && + !args[0]->ToObject()->GetHiddenValue( + String::New(kArrayBufferMarkerPropName)).IsEmpty()) { + buffer = args[0]->ToObject(); + int32_t bufferLength = + convertToUint(buffer->Get(String::New("byteLength")), &try_catch); if (try_catch.HasCaught()) return try_catch.Exception(); - if (data == NULL && array_buffer_length != 0) { - return ThrowException( - String::New("ArrayBuffer doesn't have data")); - } - - if (args.Length() > 1) { - offset = convertToUint(args[1], &try_catch); + if (args.Length() < 2 || args[1]->IsUndefined()) { + byteOffset = 0; + } else { + byteOffset = convertToUint(args[1], &try_catch); if (try_catch.HasCaught()) return try_catch.Exception(); - - // The given byteOffset must be a multiple of the element size of the - // specific type, otherwise an exception is raised. - if (offset % element_size != 0) { + if (byteOffset > bufferLength) { + return ThrowException(String::New("byteOffset out of bounds")); + } + if (byteOffset % element_size != 0) { return ThrowException( - String::New("offset must be multiple of element_size")); + String::New("byteOffset must be multiple of element_size")); } } - if (offset > array_buffer_length) { - return ThrowException( - String::New("byteOffset must be less than ArrayBuffer length.")); - } - - if (args.Length() == 2) { - // If length is not explicitly specified, the length of the ArrayBuffer - // minus the byteOffset must be a multiple of the element size of the - // specific type, or an exception is raised. - length = array_buffer_length - offset; - } - - if (args.Length() != 3) { - if (length % element_size != 0) { + if (args.Length() < 3 || args[2]->IsUndefined()) { + byteLength = bufferLength - byteOffset; + length = byteLength / element_size; + if (byteLength % element_size != 0) { return ThrowException( - String::New("ArrayBuffer length minus the byteOffset must be a " - "multiple of the element size")); + String::New("buffer size must be multiple of element_size")); + } + } else { + length = convertToUint(args[2], &try_catch); + if (try_catch.HasCaught()) return try_catch.Exception(); + byteLength = length * element_size; + if (byteOffset + byteLength > bufferLength) { + return ThrowException(String::New("length out of bounds")); } - length /= element_size; - } - - // If a given byteOffset and length references an area beyond the end of - // the ArrayBuffer an exception is raised. - if (offset + (length * element_size) > array_buffer_length) { - return ThrowException( - String::New("length references an area beyond the end of the " - "ArrayBuffer")); } - - // Hold a reference to the ArrayBuffer so its buffer doesn't get collected. - array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly); - } - - if (is_array_buffer_construct) { - array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly); + } else { + length = convertToUint(args[0], &try_catch); + byteLength = length * element_size; + byteOffset = 0; + Handle result = CreateExternalArrayBuffer(byteLength); + if (!result->IsObject()) return result; + buffer = result->ToObject(); } - Persistent persistent_array = Persistent::New(array); - persistent_array.MakeWeak(data, ExternalArrayWeakCallback); - persistent_array.MarkIndependent(); - if (data == NULL && length != 0) { - data = calloc(length, element_size); - if (data == NULL) { - return ThrowException(String::New("Memory allocation failed.")); - } - } + void* data = buffer->GetIndexedPropertiesExternalArrayData(); + ASSERT(data != NULL); + Handle array = Object::New(); array->SetIndexedPropertiesToExternalArrayData( - reinterpret_cast(data) + offset, type, - static_cast(length)); - array->Set(String::New("length"), - Int32::New(static_cast(length)), ReadOnly); - array->Set(String::New("BYTES_PER_ELEMENT"), - Int32::New(static_cast(element_size))); + static_cast(data) + byteOffset, type, length); + array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly); + array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly); + array->Set(String::New("length"), Int32::New(length), ReadOnly); + array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size)); + array->Set(String::New("buffer"), buffer, ReadOnly); + return array; } void Shell::ExternalArrayWeakCallback(Persistent object, void* data) { HandleScope scope; - Handle prop_name = String::New(kArrayBufferReferencePropName); - Handle converted_object = object->ToObject(); - Local prop_value = converted_object->Get(prop_name); - if (data != NULL && !prop_value->IsObject()) { - free(data); - } + int32_t length = + object->ToObject()->Get(String::New("byteLength"))->Uint32Value(); + V8::AdjustAmountOfExternalAllocatedMemory(-length); + delete[] static_cast(data); object.Dispose(); } Handle Shell::ArrayBuffer(const Arguments& args) { - return CreateExternalArray(args, v8::kExternalByteArray, 0); + return CreateExternalArrayBuffer(args); } @@ -806,8 +799,8 @@ Handle Shell::CreateGlobalTemplate() { global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("read"), FunctionTemplate::New(Read)); - global_template->Set(String::New("readbinary"), - FunctionTemplate::New(ReadBinary)); + global_template->Set(String::New("readbuffer"), + FunctionTemplate::New(ReadBuffer)); global_template->Set(String::New("readline"), FunctionTemplate::New(ReadLine)); global_template->Set(String::New("load"), FunctionTemplate::New(Load)); @@ -977,8 +970,8 @@ void Shell::OnExit() { printf("+--------------------------------------------+-------------+\n"); delete [] counters; } - if (counters_file_ != NULL) - delete counters_file_; + delete counters_file_; + delete counter_map_; } #endif // V8_SHARED @@ -1026,20 +1019,30 @@ static char* ReadChars(const char* name, int* size_out) { } -Handle Shell::ReadBinary(const Arguments& args) { +Handle Shell::ReadBuffer(const Arguments& args) { + ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT String::Utf8Value filename(args[0]); - int size; + int length; if (*filename == NULL) { return ThrowException(String::New("Error loading file")); } - char* chars = ReadChars(*filename, &size); - if (chars == NULL) { + + uint8_t* data = reinterpret_cast(ReadChars(*filename, &length)); + if (data == NULL) { return ThrowException(String::New("Error reading file")); } - // We skip checking the string for UTF8 characters and use it raw as - // backing store for the external string with 8-bit characters. - BinaryResource* resource = new BinaryResource(chars, size); - return String::NewExternal(resource); + Handle buffer = Object::New(); + buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); + Persistent persistent_buffer = Persistent::New(buffer); + persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback); + persistent_buffer.MarkIndependent(); + V8::AdjustAmountOfExternalAllocatedMemory(length); + + buffer->SetIndexedPropertiesToExternalArrayData( + data, kExternalUnsignedByteArray, length); + buffer->Set(String::New("byteLength"), + Int32::New(static_cast(length)), ReadOnly); + return buffer; } @@ -1203,7 +1206,7 @@ void SourceGroup::Execute() { Handle SourceGroup::ReadFile(const char* name) { int size; - const char* chars = ReadChars(name, &size); + char* chars = ReadChars(name, &size); if (chars == NULL) return Handle(); Handle result = String::New(chars, size); delete[] chars; diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index c872f90958..2789c6db3e 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -307,7 +307,7 @@ class Shell : public i::AllStatic { static Handle EnableProfiler(const Arguments& args); static Handle DisableProfiler(const Arguments& args); static Handle Read(const Arguments& args); - static Handle ReadBinary(const Arguments& args); + static Handle ReadBuffer(const Arguments& args); static Handle ReadFromStdin(); static Handle ReadLine(const Arguments& args) { return ReadFromStdin(); @@ -383,9 +383,11 @@ class Shell : public i::AllStatic { static void RunShell(); static bool SetOptions(int argc, char* argv[]); static Handle CreateGlobalTemplate(); + static Handle CreateExternalArrayBuffer(int32_t size); + static Handle CreateExternalArrayBuffer(const Arguments& args); static Handle CreateExternalArray(const Arguments& args, ExternalArrayType type, - size_t element_size); + int32_t element_size); static void ExternalArrayWeakCallback(Persistent object, void* data); }; diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index bf269230b8..819135add4 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -2174,7 +2174,7 @@ function DebugResponseDetails(response) { } var current_line = from_line + num; - spacer = maxdigits - (1 + Math.floor(log10(current_line))); + var spacer = maxdigits - (1 + Math.floor(log10(current_line))); if (current_line == Debug.State.currentSourceLine + 1) { for (var i = 0; i < maxdigits; i++) { result += '>'; diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h index 32f0f9ea8f..a5c7143bdd 100644 --- a/deps/v8/src/dateparser-inl.h +++ b/deps/v8/src/dateparser-inl.h @@ -148,6 +148,9 @@ bool DateParser::Parse(Vector str, } else { // Garbage words are illegal if a number has been read. if (has_read_number) return false; + // The first number has to be separated from garbage words by + // whitespace or other separators. + if (scanner.Peek().IsNumber()) return false; } } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) { // Parse UTC offset (only after UTC or time). diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index bdc7a578ac..e856222775 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -157,7 +157,9 @@ void DebuggerAgent::OnSessionClosed(DebuggerAgentSession* session) { ScopedLock with(session_access_); ASSERT(session == session_); if (session == session_) { - CloseSession(); + session_->Shutdown(); + delete session_; + session_ = NULL; } } @@ -247,7 +249,7 @@ SmartArrayPointer DebuggerAgentUtil::ReceiveMessage(const Socket* conn) { while (!(c == '\n' && prev_c == '\r')) { prev_c = c; received = conn->Receive(&c, 1); - if (received <= 0) { + if (received == 0) { PrintF("Error %d\n", Socket::LastError()); return SmartArrayPointer(); } @@ -323,41 +325,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn, const char* embedding_host) { static const int kBufferSize = 80; char buffer[kBufferSize]; // Sending buffer. + bool ok; int len; - int r; // Send the header. len = OS::SNPrintF(Vector(buffer, kBufferSize), "Type: connect\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; len = OS::SNPrintF(Vector(buffer, kBufferSize), "V8-Version: %s\r\n", v8::V8::GetVersion()); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; len = OS::SNPrintF(Vector(buffer, kBufferSize), "Protocol-Version: 1\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; if (embedding_host != NULL) { len = OS::SNPrintF(Vector(buffer, kBufferSize), "Embedding-Host: %s\r\n", embedding_host); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; } len = OS::SNPrintF(Vector(buffer, kBufferSize), "%s: 0\r\n", kContentLength); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; // Terminate header with empty line. len = OS::SNPrintF(Vector(buffer, kBufferSize), "\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; // No body for connect message. @@ -397,7 +399,7 @@ bool DebuggerAgentUtil::SendMessage(const Socket* conn, uint16_t character = message[i]; buffer_position += unibrow::Utf8::Encode(buffer + buffer_position, character, previous); - ASSERT(buffer_position < kBufferSize); + ASSERT(buffer_position <= kBufferSize); // Send buffer if full or last character is encoded. if (kBufferSize - buffer_position < @@ -454,7 +456,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) { int total_received = 0; while (total_received < len) { int received = conn->Receive(data + total_received, len - total_received); - if (received <= 0) { + if (received == 0) { return total_received; } total_received += received; diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 802f6224c4..91838e8ad0 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -1957,7 +1957,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) { frame_index = request.arguments.frameNumber; if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) { - return response.failed('Invalid frame number'); + throw new Error('Invalid frame number'); } return this.exec_state_.frame(frame_index); } else { @@ -1966,20 +1966,44 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { }; -DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { - // No frames no scopes. - if (this.exec_state_.frameCount() == 0) { - return response.failed('No scopes'); +// Gets scope host object from request. It is either a function +// ('functionHandle' argument must be specified) or a stack frame +// ('frameNumber' may be specified and the current frame is taken by default). +DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ = + function(request) { + if (request.arguments && "functionHandle" in request.arguments) { + if (!IS_NUMBER(request.arguments.functionHandle)) { + throw new Error('Function handle must be a number'); + } + var function_mirror = LookupMirror(request.arguments.functionHandle); + if (!function_mirror) { + throw new Error('Failed to find function object by handle'); + } + if (!function_mirror.isFunction()) { + throw new Error('Value of non-function type is found by handle'); + } + return function_mirror; + } else { + // No frames no scopes. + if (this.exec_state_.frameCount() == 0) { + throw new Error('No scopes'); + } + + // Get the frame for which the scopes are requested. + var frame = this.frameForScopeRequest_(request); + return frame; } +} - // Get the frame for which the scopes are requested. - var frame = this.frameForScopeRequest_(request); - // Fill all scopes for this frame. - var total_scopes = frame.scopeCount(); +DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { + var scope_holder = this.scopeHolderForScopeRequest_(request); + + // Fill all scopes for this frame or function. + var total_scopes = scope_holder.scopeCount(); var scopes = []; for (var i = 0; i < total_scopes; i++) { - scopes.push(frame.scope(i)); + scopes.push(scope_holder.scope(i)); } response.body = { fromScope: 0, @@ -1991,24 +2015,19 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { - // No frames no scopes. - if (this.exec_state_.frameCount() == 0) { - return response.failed('No scopes'); - } - - // Get the frame for which the scope is requested. - var frame = this.frameForScopeRequest_(request); + // Get the frame or function for which the scope is requested. + var scope_holder = this.scopeHolderForScopeRequest_(request); // With no scope argument just return top scope. var scope_index = 0; if (request.arguments && !IS_UNDEFINED(request.arguments.number)) { scope_index = %ToNumber(request.arguments.number); - if (scope_index < 0 || frame.scopeCount() <= scope_index) { + if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) { return response.failed('Invalid scope number'); } } - response.body = frame.scope(scope_index); + response.body = scope_holder.scope(scope_index); }; diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index f8a1ecf4f9..543ce9f24d 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -892,6 +892,16 @@ void Debug::Iterate(ObjectVisitor* v) { } +void Debug::PutValuesOnStackAndDie(int start, + Address c_entry_fp, + Address last_fp, + Address larger_fp, + int count, + int end) { + OS::Abort(); +} + + Object* Debug::Break(Arguments args) { Heap* heap = isolate_->heap(); HandleScope scope(isolate_); @@ -984,11 +994,34 @@ Object* Debug::Break(Arguments args) { // Count frames until target frame int count = 0; JavaScriptFrameIterator it(isolate_); - while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) { + while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) { count++; it.Advance(); } + // Catch the cases that would lead to crashes and capture + // - C entry FP at which to start stack crawl. + // - FP of the frame at which we plan to stop stepping out (last FP). + // - current FP that's larger than last FP. + // - Counter for the number of steps to step out. + if (it.done()) { + // We crawled the entire stack, never reaching last_fp_. + PutValuesOnStackAndDie(0xBEEEEEEE, + frame->fp(), + thread_local_.last_fp_, + NULL, + count, + 0xFEEEEEEE); + } else if (it.frame()->fp() != thread_local_.last_fp_) { + // We crawled over last_fp_, without getting a match. + PutValuesOnStackAndDie(0xBEEEEEEE, + frame->fp(), + thread_local_.last_fp_, + it.frame()->fp(), + count, + 0xFEEEEEEE); + } + // If we found original frame if (it.frame()->fp() == thread_local_.last_fp_) { if (step_count > 1) { @@ -1418,7 +1451,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { // Remember source position and frame to handle step next. thread_local_.last_statement_position_ = debug_info->code()->SourceStatementPosition(frame->pc()); - thread_local_.last_fp_ = frame->fp(); + thread_local_.last_fp_ = frame->UnpaddedFP(); } else { // If there's restarter frame on top of the stack, just get the pointer // to function which is going to be restarted. @@ -1487,7 +1520,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { // propagated on the next Debug::Break. thread_local_.last_statement_position_ = debug_info->code()->SourceStatementPosition(frame->pc()); - thread_local_.last_fp_ = frame->fp(); + thread_local_.last_fp_ = frame->UnpaddedFP(); } // Step in or Step in min @@ -1522,7 +1555,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator, // Continue if we are still on the same frame and in the same statement. int current_statement_position = break_location_iterator->code()->SourceStatementPosition(frame->pc()); - return thread_local_.last_fp_ == frame->fp() && + return thread_local_.last_fp_ == frame->UnpaddedFP() && thread_local_.last_statement_position_ == current_statement_position; } @@ -1723,7 +1756,7 @@ void Debug::ClearOneShot() { void Debug::ActivateStepIn(StackFrame* frame) { ASSERT(!StepOutActive()); - thread_local_.step_into_fp_ = frame->fp(); + thread_local_.step_into_fp_ = frame->UnpaddedFP(); } @@ -1734,7 +1767,7 @@ void Debug::ClearStepIn() { void Debug::ActivateStepOut(StackFrame* frame) { ASSERT(!StepInActive()); - thread_local_.step_out_fp_ = frame->fp(); + thread_local_.step_out_fp_ = frame->UnpaddedFP(); } @@ -1751,20 +1784,19 @@ void Debug::ClearStepNext() { // Helper function to compile full code for debugging. This code will -// have debug break slots and deoptimization -// information. Deoptimization information is required in case that an -// optimized version of this function is still activated on the -// stack. It will also make sure that the full code is compiled with -// the same flags as the previous version - that is flags which can -// change the code generated. The current method of mapping from -// already compiled full code without debug break slots to full code -// with debug break slots depends on the generated code is otherwise -// exactly the same. -static bool CompileFullCodeForDebugging(Handle shared, +// have debug break slots and deoptimization information. Deoptimization +// information is required in case that an optimized version of this +// function is still activated on the stack. It will also make sure that +// the full code is compiled with the same flags as the previous version, +// that is flags which can change the code generated. The current method +// of mapping from already compiled full code without debug break slots +// to full code with debug break slots depends on the generated code is +// otherwise exactly the same. +static bool CompileFullCodeForDebugging(Handle function, Handle current_code) { ASSERT(!current_code->has_debug_break_slots()); - CompilationInfo info(shared); + CompilationInfo info(function); info.MarkCompilingForDebugging(current_code); ASSERT(!info.shared_info()->is_compiled()); ASSERT(!info.isolate()->has_pending_exception()); @@ -1776,7 +1808,7 @@ static bool CompileFullCodeForDebugging(Handle shared, info.isolate()->clear_pending_exception(); #if DEBUG if (result) { - Handle new_code(shared->code()); + Handle new_code(function->shared()->code()); ASSERT(new_code->has_debug_break_slots()); ASSERT(current_code->is_compiled_optimizable() == new_code->is_compiled_optimizable()); @@ -1857,13 +1889,6 @@ static void RedirectActivationsToRecompiledCodeOnThread( // break slots. debug_break_slot_count++; } - if (frame_code->has_self_optimization_header() && - !new_code->has_self_optimization_header()) { - delta -= FullCodeGenerator::self_optimization_header_size(); - } else { - ASSERT(frame_code->has_self_optimization_header() == - new_code->has_self_optimization_header()); - } int debug_break_slot_bytes = debug_break_slot_count * Assembler::kDebugBreakSlotLength; if (FLAG_trace_deopt) { @@ -1987,6 +2012,7 @@ void Debug::PrepareForBreakPoints() { // patch the return address to run in the new compiled code. for (int i = 0; i < active_functions.length(); i++) { Handle function = active_functions[i]; + Handle shared(function->shared()); if (function->code()->kind() == Code::FUNCTION && function->code()->has_debug_break_slots()) { @@ -1994,7 +2020,6 @@ void Debug::PrepareForBreakPoints() { continue; } - Handle shared(function->shared()); // If recompilation is not possible just skip it. if (shared->is_toplevel() || !shared->allows_lazy_compilation() || @@ -2014,7 +2039,7 @@ void Debug::PrepareForBreakPoints() { isolate_->debugger()->force_debugger_active(); isolate_->debugger()->set_force_debugger_active(true); ASSERT(current_code->kind() == Code::FUNCTION); - CompileFullCodeForDebugging(shared, current_code); + CompileFullCodeForDebugging(function, current_code); isolate_->debugger()->set_force_debugger_active( prev_force_debugger_active); if (!shared->is_compiled()) { @@ -2234,6 +2259,13 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id, } +const int Debug::FramePaddingLayout::kInitialSize = 1; + + +// Any even value bigger than kInitialSize as needed for stack scanning. +const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1; + + bool Debug::IsDebugGlobal(GlobalObject* global) { return IsLoaded() && global == debug_context()->global(); } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 474b90bd21..d9c966c37f 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -232,6 +232,12 @@ class Debug { void PreemptionWhileInDebugger(); void Iterate(ObjectVisitor* v); + NO_INLINE(void PutValuesOnStackAndDie(int start, + Address c_entry_fp, + Address last_fp, + Address larger_fp, + int count, + int end)); Object* Break(Arguments args); void SetBreakPoint(Handle shared, Handle break_point_object, @@ -245,6 +251,8 @@ class Debug { bool IsBreakOnException(ExceptionBreakType type); void PrepareStep(StepAction step_action, int step_count); void ClearStepping(); + void ClearStepOut(); + bool IsStepping() { return thread_local_.step_count_ > 0; } bool StepNextContinue(BreakLocationIterator* break_location_iterator, JavaScriptFrame* frame); static Handle GetDebugInfo(Handle shared); @@ -455,6 +463,50 @@ class Debug { // Architecture-specific constant. static const bool kFrameDropperSupported; + /** + * Defines layout of a stack frame that supports padding. This is a regular + * internal frame that has a flexible stack structure. LiveEdit can shift + * its lower part up the stack, taking up the 'padding' space when additional + * stack memory is required. + * Such frame is expected immediately above the topmost JavaScript frame. + * + * Stack Layout: + * --- Top + * LiveEdit routine frames + * --- + * C frames of debug handler + * --- + * ... + * --- + * An internal frame that has n padding words: + * - any number of words as needed by code -- upper part of frame + * - padding size: a Smi storing n -- current size of padding + * - padding: n words filled with kPaddingValue in form of Smi + * - 3 context/type words of a regular InternalFrame + * - fp + * --- + * Topmost JavaScript frame + * --- + * ... + * --- Bottom + */ + class FramePaddingLayout : public AllStatic { + public: + // Architecture-specific constant. + static const bool kIsSupported; + + // A size of frame base including fp. Padding words starts right above + // the base. + static const int kFrameBaseSize = 4; + + // A number of words that should be reserved on stack for the LiveEdit use. + // Normally equals 1. Stored on stack in form of Smi. + static const int kInitialSize; + // A value that padding words are filled with (in form of Smi). Going + // bottom-top, the first word not having this value is a counter word. + static const int kPaddingValue; + }; + private: explicit Debug(Isolate* isolate); ~Debug(); @@ -464,7 +516,6 @@ class Debug { void ActivateStepIn(StackFrame* frame); void ClearStepIn(); void ActivateStepOut(StackFrame* frame); - void ClearStepOut(); void ClearStepNext(); // Returns whether the compile succeeded. void RemoveDebugInfo(Handle debug_info); diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index 2a30ddd3da..3debf55cd6 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -354,6 +354,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, bailout_type_(type), from_(from), fp_to_sp_delta_(fp_to_sp_delta), + has_alignment_padding_(0), input_(NULL), output_count_(0), jsframe_count_(0), @@ -378,6 +379,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, reinterpret_cast(from), fp_to_sp_delta - (2 * kPointerSize)); } + function->shared()->increment_deopt_count(); // Find the optimized code. if (type == EAGER) { ASSERT(from == NULL); @@ -593,12 +595,14 @@ void Deoptimizer::DoComputeOutputFrames() { PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ", reinterpret_cast(function)); function->PrintName(); - PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n", + PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s," + " took %0.3f ms]\n", node_id, output_[index]->GetPc(), FullCodeGenerator::State2String( static_cast( output_[index]->GetState()->value())), + has_alignment_padding_ ? "with padding" : "no padding", ms); } } @@ -769,7 +773,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, if (FLAG_trace_deopt) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); - PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ", + PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ", output_offset, input_value, input_offset); @@ -789,7 +793,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, if (FLAG_trace_deopt) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); - PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n", + PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n", output_offset, value, input_offset, @@ -815,7 +819,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, input_->GetOffsetFromSlotIndex(input_slot_index); double value = input_->GetDoubleFrameSlot(input_offset); if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n", + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n", output_[frame_index]->GetTop() + output_offset, output_offset, value, @@ -1290,7 +1294,7 @@ Object* FrameDescription::GetExpression(int index) { } -void TranslationBuffer::Add(int32_t value) { +void TranslationBuffer::Add(int32_t value, Zone* zone) { // Encode the sign bit in the least significant bit. bool is_negative = (value < 0); uint32_t bits = ((is_negative ? -value : value) << 1) | @@ -1299,7 +1303,7 @@ void TranslationBuffer::Add(int32_t value) { // each byte to indicate whether or not more bytes follow. do { uint32_t next = bits >> 7; - contents_.Add(((bits << 1) & 0xFF) | (next != 0)); + contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone); bits = next; } while (bits != 0); } @@ -1332,76 +1336,76 @@ Handle TranslationBuffer::CreateByteArray() { void Translation::BeginConstructStubFrame(int literal_id, unsigned height) { - buffer_->Add(CONSTRUCT_STUB_FRAME); - buffer_->Add(literal_id); - buffer_->Add(height); + buffer_->Add(CONSTRUCT_STUB_FRAME, zone()); + buffer_->Add(literal_id, zone()); + buffer_->Add(height, zone()); } void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) { - buffer_->Add(ARGUMENTS_ADAPTOR_FRAME); - buffer_->Add(literal_id); - buffer_->Add(height); + buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone()); + buffer_->Add(literal_id, zone()); + buffer_->Add(height, zone()); } void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) { - buffer_->Add(JS_FRAME); - buffer_->Add(node_id); - buffer_->Add(literal_id); - buffer_->Add(height); + buffer_->Add(JS_FRAME, zone()); + buffer_->Add(node_id, zone()); + buffer_->Add(literal_id, zone()); + buffer_->Add(height, zone()); } void Translation::StoreRegister(Register reg) { - buffer_->Add(REGISTER); - buffer_->Add(reg.code()); + buffer_->Add(REGISTER, zone()); + buffer_->Add(reg.code(), zone()); } void Translation::StoreInt32Register(Register reg) { - buffer_->Add(INT32_REGISTER); - buffer_->Add(reg.code()); + buffer_->Add(INT32_REGISTER, zone()); + buffer_->Add(reg.code(), zone()); } void Translation::StoreDoubleRegister(DoubleRegister reg) { - buffer_->Add(DOUBLE_REGISTER); - buffer_->Add(DoubleRegister::ToAllocationIndex(reg)); + buffer_->Add(DOUBLE_REGISTER, zone()); + buffer_->Add(DoubleRegister::ToAllocationIndex(reg), zone()); } void Translation::StoreStackSlot(int index) { - buffer_->Add(STACK_SLOT); - buffer_->Add(index); + buffer_->Add(STACK_SLOT, zone()); + buffer_->Add(index, zone()); } void Translation::StoreInt32StackSlot(int index) { - buffer_->Add(INT32_STACK_SLOT); - buffer_->Add(index); + buffer_->Add(INT32_STACK_SLOT, zone()); + buffer_->Add(index, zone()); } void Translation::StoreDoubleStackSlot(int index) { - buffer_->Add(DOUBLE_STACK_SLOT); - buffer_->Add(index); + buffer_->Add(DOUBLE_STACK_SLOT, zone()); + buffer_->Add(index, zone()); } void Translation::StoreLiteral(int literal_id) { - buffer_->Add(LITERAL); - buffer_->Add(literal_id); + buffer_->Add(LITERAL, zone()); + buffer_->Add(literal_id, zone()); } void Translation::StoreArgumentsObject() { - buffer_->Add(ARGUMENTS_OBJECT); + buffer_->Add(ARGUMENTS_OBJECT, zone()); } void Translation::MarkDuplicate() { - buffer_->Add(DUPLICATE); + buffer_->Add(DUPLICATE, zone()); } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 6bc4a51036..9e8a5491a2 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -221,6 +221,10 @@ class Deoptimizer : public Malloced { } static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } + static int has_alignment_padding_offset() { + return OFFSET_OF(Deoptimizer, has_alignment_padding_); + } + static int GetDeoptimizedCodeCount(Isolate* isolate); static const int kNotDeoptimizationEntry = -1; @@ -322,6 +326,7 @@ class Deoptimizer : public Malloced { BailoutType bailout_type_; Address from_; int fp_to_sp_delta_; + int has_alignment_padding_; // Input frame description. FrameDescription* input_; @@ -515,10 +520,10 @@ class FrameDescription { class TranslationBuffer BASE_EMBEDDED { public: - TranslationBuffer() : contents_(256) { } + explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { } int CurrentIndex() const { return contents_.length(); } - void Add(int32_t value); + void Add(int32_t value, Zone* zone); Handle CreateByteArray(); @@ -569,12 +574,14 @@ class Translation BASE_EMBEDDED { DUPLICATE }; - Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count) + Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count, + Zone* zone) : buffer_(buffer), - index_(buffer->CurrentIndex()) { - buffer_->Add(BEGIN); - buffer_->Add(frame_count); - buffer_->Add(jsframe_count); + index_(buffer->CurrentIndex()), + zone_(zone) { + buffer_->Add(BEGIN, zone); + buffer_->Add(frame_count, zone); + buffer_->Add(jsframe_count, zone); } int index() const { return index_; } @@ -593,6 +600,8 @@ class Translation BASE_EMBEDDED { void StoreArgumentsObject(); void MarkDuplicate(); + Zone* zone() const { return zone_; } + static int NumberOfOperandsFor(Opcode opcode); #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) @@ -602,6 +611,7 @@ class Translation BASE_EMBEDDED { private: TranslationBuffer* buffer_; int index_; + Zone* zone_; }; diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h index 16a3245e9a..fcf6906af7 100644 --- a/deps/v8/src/double.h +++ b/deps/v8/src/double.h @@ -130,12 +130,6 @@ class Double { return (d64 & kExponentMask) == kExponentMask; } - bool IsNan() const { - uint64_t d64 = AsUint64(); - return ((d64 & kExponentMask) == kExponentMask) && - ((d64 & kSignificandMask) != 0); - } - bool IsInfinite() const { uint64_t d64 = AsUint64(); return ((d64 & kExponentMask) == kExponentMask) && diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc new file mode 100644 index 0000000000..655a23bf1e --- /dev/null +++ b/deps/v8/src/elements-kind.cc @@ -0,0 +1,134 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "elements-kind.h" + +#include "api.h" +#include "elements.h" +#include "objects.h" + +namespace v8 { +namespace internal { + + +void PrintElementsKind(FILE* out, ElementsKind kind) { + ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); + PrintF(out, "%s", accessor->name()); +} + + +ElementsKind GetInitialFastElementsKind() { + if (FLAG_packed_arrays) { + return FAST_SMI_ELEMENTS; + } else { + return FAST_HOLEY_SMI_ELEMENTS; + } +} + + +struct InitializeFastElementsKindSequence { + static void Construct( + ElementsKind** fast_elements_kind_sequence_ptr) { + ElementsKind* fast_elements_kind_sequence = + new ElementsKind[kFastElementsKindCount]; + *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence; + STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND); + fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS; + fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS; + fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS; + fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS; + fast_elements_kind_sequence[4] = FAST_ELEMENTS; + fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS; + } +}; + + +static LazyInstance::type + fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER; + + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) { + ASSERT(sequence_number >= 0 && + sequence_number < kFastElementsKindCount); + return fast_elements_kind_sequence.Get()[sequence_number]; +} + +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) { + for (int i = 0; i < kFastElementsKindCount; ++i) { + if (fast_elements_kind_sequence.Get()[i] == elements_kind) { + return i; + } + } + UNREACHABLE(); + return 0; +} + + +ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, + bool allow_only_packed) { + ASSERT(IsFastElementsKind(elements_kind)); + ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND); + while (true) { + int index = + GetSequenceIndexFromFastElementsKind(elements_kind) + 1; + elements_kind = GetFastElementsKindFromSequenceIndex(index); + if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) { + return elements_kind; + } + } + UNREACHABLE(); + return TERMINAL_FAST_ELEMENTS_KIND; +} + + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind) { + switch (from_kind) { + case FAST_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS; + case FAST_HOLEY_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS; + case FAST_DOUBLE_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS && + to_kind != FAST_DOUBLE_ELEMENTS; + case FAST_HOLEY_DOUBLE_ELEMENTS: + return to_kind == FAST_ELEMENTS || + to_kind == FAST_HOLEY_ELEMENTS; + case FAST_ELEMENTS: + return to_kind == FAST_HOLEY_ELEMENTS; + case FAST_HOLEY_ELEMENTS: + return false; + default: + return false; + } +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h new file mode 100644 index 0000000000..3be7711a35 --- /dev/null +++ b/deps/v8/src/elements-kind.h @@ -0,0 +1,221 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ELEMENTS_KIND_H_ +#define V8_ELEMENTS_KIND_H_ + +#include "v8checks.h" + +namespace v8 { +namespace internal { + +enum ElementsKind { + // The "fast" kind for elements that only contain SMI values. Must be first + // to make it possible to efficiently check maps for this kind. + FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, + + // The "fast" kind for tagged values. Must be second to make it possible to + // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind + // together at once. + FAST_ELEMENTS, + FAST_HOLEY_ELEMENTS, + + // The "fast" kind for unwrapped, non-tagged double values. + FAST_DOUBLE_ELEMENTS, + FAST_HOLEY_DOUBLE_ELEMENTS, + + // The "slow" kind. + DICTIONARY_ELEMENTS, + NON_STRICT_ARGUMENTS_ELEMENTS, + // The "fast" kind for external arrays + EXTERNAL_BYTE_ELEMENTS, + EXTERNAL_UNSIGNED_BYTE_ELEMENTS, + EXTERNAL_SHORT_ELEMENTS, + EXTERNAL_UNSIGNED_SHORT_ELEMENTS, + EXTERNAL_INT_ELEMENTS, + EXTERNAL_UNSIGNED_INT_ELEMENTS, + EXTERNAL_FLOAT_ELEMENTS, + EXTERNAL_DOUBLE_ELEMENTS, + EXTERNAL_PIXEL_ELEMENTS, + + // Derived constants from ElementsKind + FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS, + LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, + FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS, + LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS, + FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS, + LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, + TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS +}; + +const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; +const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND - + FIRST_FAST_ELEMENTS_KIND + 1; + +void PrintElementsKind(FILE* out, ElementsKind kind); + +ElementsKind GetInitialFastElementsKind(); + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index); + +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind); + + +inline bool IsDictionaryElementsKind(ElementsKind kind) { + return kind == DICTIONARY_ELEMENTS; +} + + +inline bool IsExternalArrayElementsKind(ElementsKind kind) { + return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && + kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND; +} + + +inline bool IsFastElementsKind(ElementsKind kind) { + ASSERT(FIRST_FAST_ELEMENTS_KIND == 0); + return kind <= FAST_HOLEY_DOUBLE_ELEMENTS; +} + + +inline bool IsFastDoubleElementsKind(ElementsKind kind) { + return kind == FAST_DOUBLE_ELEMENTS || + kind == FAST_HOLEY_DOUBLE_ELEMENTS; +} + + +inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_HOLEY_SMI_ELEMENTS || + kind == FAST_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsFastSmiElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_HOLEY_SMI_ELEMENTS; +} + + +inline bool IsFastObjectElementsKind(ElementsKind kind) { + return kind == FAST_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsFastHoleyElementsKind(ElementsKind kind) { + return kind == FAST_HOLEY_SMI_ELEMENTS || + kind == FAST_HOLEY_DOUBLE_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsHoleyElementsKind(ElementsKind kind) { + return IsFastHoleyElementsKind(kind) || + kind == DICTIONARY_ELEMENTS; +} + + +inline bool IsFastPackedElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_DOUBLE_ELEMENTS || + kind == FAST_ELEMENTS; +} + + +inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) { + if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) { + return FAST_SMI_ELEMENTS; + } + if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) { + return FAST_DOUBLE_ELEMENTS; + } + if (holey_kind == FAST_HOLEY_ELEMENTS) { + return FAST_ELEMENTS; + } + return holey_kind; +} + + +inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) { + if (packed_kind == FAST_SMI_ELEMENTS) { + return FAST_HOLEY_SMI_ELEMENTS; + } + if (packed_kind == FAST_DOUBLE_ELEMENTS) { + return FAST_HOLEY_DOUBLE_ELEMENTS; + } + if (packed_kind == FAST_ELEMENTS) { + return FAST_HOLEY_ELEMENTS; + } + return packed_kind; +} + + +inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) { + ASSERT(IsFastSmiElementsKind(from_kind)); + return (from_kind == FAST_SMI_ELEMENTS) + ? FAST_ELEMENTS + : FAST_HOLEY_ELEMENTS; +} + + +inline bool IsSimpleMapChangeTransition(ElementsKind from_kind, + ElementsKind to_kind) { + return (GetHoleyElementsKind(from_kind) == to_kind) || + (IsFastSmiElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)); +} + + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind); + + +inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) { + return IsFastElementsKind(from_kind) && + from_kind != TERMINAL_FAST_ELEMENTS_KIND; +} + + +ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, + bool allow_only_packed); + + +inline bool CanTransitionToMoreGeneralFastElementsKind( + ElementsKind elements_kind, + bool allow_only_packed) { + return IsFastElementsKind(elements_kind) && + (elements_kind != TERMINAL_FAST_ELEMENTS_KIND && + (!allow_only_packed || elements_kind != FAST_ELEMENTS)); +} + + +} } // namespace v8::internal + +#endif // V8_ELEMENTS_KIND_H_ diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index aa51ea9b78..f0e1414de4 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -39,8 +39,14 @@ // Inheritance hierarchy: // - ElementsAccessorBase (abstract) // - FastElementsAccessor (abstract) -// - FastObjectElementsAccessor +// - FastSmiOrObjectElementsAccessor +// - FastPackedSmiElementsAccessor +// - FastHoleySmiElementsAccessor +// - FastPackedObjectElementsAccessor +// - FastHoleyObjectElementsAccessor // - FastDoubleElementsAccessor +// - FastPackedDoubleElementsAccessor +// - FastHoleyDoubleElementsAccessor // - ExternalElementsAccessor (abstract) // - ExternalByteElementsAccessor // - ExternalUnsignedByteElementsAccessor @@ -59,15 +65,24 @@ namespace v8 { namespace internal { +static const int kPackedSizeNotKnown = -1; + + // First argument in list is the accessor class, the second argument is the // accessor ElementsKind, and the third is the backing store class. Use the // fast element handler for smi-only arrays. The implementation is currently // identical. Note that the order must match that of the ElementsKind enum for // the |accessor_array[]| below to work. #define ELEMENTS_LIST(V) \ - V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \ - V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \ - V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \ + V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \ + V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \ + FixedArray) \ + V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \ + V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \ + V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \ + FixedDoubleArray) \ + V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \ + FixedDoubleArray) \ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \ SeededNumberDictionary) \ V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \ @@ -139,8 +154,6 @@ void CopyObjectToObjectElements(FixedArray* from, uint32_t to_start, int raw_copy_size) { ASSERT(to->map() != HEAP->fixed_cow_array_map()); - ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS); - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || @@ -148,7 +161,7 @@ void CopyObjectToObjectElements(FixedArray* from, copy_size = Min(from->length() - from_start, to->length() - to_start); #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -160,12 +173,15 @@ void CopyObjectToObjectElements(FixedArray* from, ASSERT((copy_size + static_cast(to_start)) <= to->length() && (copy_size + static_cast(from_start)) <= from->length()); if (copy_size == 0) return; + ASSERT(IsFastSmiOrObjectElementsKind(from_kind)); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); Address to_address = to->address() + FixedArray::kHeaderSize; Address from_address = from->address() + FixedArray::kHeaderSize; CopyWords(reinterpret_cast(to_address) + to_start, reinterpret_cast(from_address) + from_start, copy_size); - if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Heap* heap = from->GetHeap(); if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), @@ -190,7 +206,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // Fast object arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -200,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, #endif } ASSERT(to != from); - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); if (copy_size == 0) return; uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { @@ -216,7 +232,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, to->set_the_hole(i + to_start); } } - if (to_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(to_kind)) { if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), to->OffsetOfElementAt(to_start), @@ -234,7 +250,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || @@ -242,7 +258,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( copy_size = Min(from->length() - from_start, to->length() - to_start); #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -255,14 +271,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( (copy_size + static_cast(from_start)) <= from->length()); if (copy_size == 0) return from; for (int i = 0; i < copy_size; ++i) { - if (to_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(to_kind)) { UNIMPLEMENTED(); return Failure::Exception(); } else { MaybeObject* maybe_value = from->get(i + from_start); Object* value; - ASSERT(to_kind == FAST_ELEMENTS); - // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects + ASSERT(IsFastObjectElementsKind(to_kind)); + // Because Double -> Object elements transitions allocate HeapObjects // iteratively, the allocate must succeed within a single GC cycle, // otherwise the retry after the GC will also fail. In order to ensure // that no GC is triggered, allocate HeapNumbers from old space if they @@ -313,6 +329,76 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from, } +static void CopySmiToDoubleElements(FixedArray* from, + uint32_t from_start, + FixedDoubleArray* to, + uint32_t to_start, + int raw_copy_size) { + int copy_size = raw_copy_size; + if (raw_copy_size < 0) { + ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from->length() - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + for (int i = to_start + copy_size; i < to->length(); ++i) { + to->set_the_hole(i); + } + } + } + ASSERT((copy_size + static_cast(to_start)) <= to->length() && + (copy_size + static_cast(from_start)) <= from->length()); + if (copy_size == 0) return; + Object* the_hole = from->GetHeap()->the_hole_value(); + for (uint32_t from_end = from_start + static_cast(copy_size); + from_start < from_end; from_start++, to_start++) { + Object* hole_or_smi = from->get(from_start); + if (hole_or_smi == the_hole) { + to->set_the_hole(to_start); + } else { + to->set(to_start, Smi::cast(hole_or_smi)->value()); + } + } +} + + +static void CopyPackedSmiToDoubleElements(FixedArray* from, + uint32_t from_start, + FixedDoubleArray* to, + uint32_t to_start, + int packed_size, + int raw_copy_size) { + int copy_size = raw_copy_size; + uint32_t to_end; + if (raw_copy_size < 0) { + ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || + raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); + copy_size = from->length() - from_start; + if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { + to_end = to->length(); + } else { + to_end = to_start + static_cast(copy_size); + } + } else { + to_end = to_start + static_cast(copy_size); + } + ASSERT(static_cast(to_end) <= to->length()); + ASSERT(packed_size >= 0 && packed_size <= copy_size); + ASSERT((copy_size + static_cast(to_start)) <= to->length() && + (copy_size + static_cast(from_start)) <= from->length()); + if (copy_size == 0) return; + for (uint32_t from_end = from_start + static_cast(packed_size); + from_start < from_end; from_start++, to_start++) { + Object* smi = from->get(from_start); + ASSERT(!smi->IsTheHole()); + to->set(to_start, Smi::cast(smi)->value()); + } + + while (to_start < to_end) { + to->set_the_hole(to_start++); + } +} + + static void CopyObjectToDoubleElements(FixedArray* from, uint32_t from_start, FixedDoubleArray* to, @@ -332,12 +418,14 @@ static void CopyObjectToDoubleElements(FixedArray* from, ASSERT((copy_size + static_cast(to_start)) <= to->length() && (copy_size + static_cast(from_start)) <= from->length()); if (copy_size == 0) return; - for (int i = 0; i < copy_size; i++) { - Object* hole_or_object = from->get(i + from_start); - if (hole_or_object->IsTheHole()) { - to->set_the_hole(i + to_start); + Object* the_hole = from->GetHeap()->the_hole_value(); + for (uint32_t from_end = from_start + copy_size; + from_start < from_end; from_start++, to_start++) { + Object* hole_or_object = from->get(from_start); + if (hole_or_object == the_hole) { + to->set_the_hole(to_start); } else { - to->set(i + to_start, hole_or_object->Number()); + to->set(to_start, hole_or_object->Number()); } } } @@ -404,6 +492,38 @@ class ElementsAccessorBase : public ElementsAccessor { virtual ElementsKind kind() const { return ElementsTraits::Kind; } + static void ValidateContents(JSObject* holder, int length) { + } + + static void ValidateImpl(JSObject* holder) { + FixedArrayBase* fixed_array_base = holder->elements(); + // When objects are first allocated, its elements are Failures. + if (fixed_array_base->IsFailure()) return; + if (!fixed_array_base->IsHeapObject()) return; + Map* map = fixed_array_base->map(); + // Arrays that have been shifted in place can't be verified. + Heap* heap = holder->GetHeap(); + if (map == heap->raw_unchecked_one_pointer_filler_map() || + map == heap->raw_unchecked_two_pointer_filler_map() || + map == heap->free_space_map()) { + return; + } + int length = 0; + if (holder->IsJSArray()) { + Object* length_obj = JSArray::cast(holder)->length(); + if (length_obj->IsSmi()) { + length = Smi::cast(length_obj)->value(); + } + } else { + length = fixed_array_base->length(); + } + ElementsAccessorSubclass::ValidateContents(holder, length); + } + + virtual void Validate(JSObject* holder) { + ElementsAccessorSubclass::ValidateImpl(holder); + } + static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, @@ -424,10 +544,10 @@ class ElementsAccessorBase : public ElementsAccessor { receiver, holder, key, BackingStore::cast(backing_store)); } - virtual MaybeObject* Get(Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store) { + MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store) { if (backing_store == NULL) { backing_store = holder->elements(); } @@ -435,76 +555,94 @@ class ElementsAccessorBase : public ElementsAccessor { receiver, holder, key, BackingStore::cast(backing_store)); } - static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - BackingStore* backing_store) { + MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, + JSObject* obj, + uint32_t key, + BackingStore* backing_store) { return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) ? backing_store->get(key) : backing_store->GetHeap()->the_hole_value(); } - virtual MaybeObject* SetLength(JSArray* array, - Object* length) { + MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array, + Object* length) { return ElementsAccessorSubclass::SetLengthImpl( array, length, BackingStore::cast(array->elements())); } - static MaybeObject* SetLengthImpl(JSObject* obj, - Object* length, - BackingStore* backing_store); + MUST_USE_RESULT static MaybeObject* SetLengthImpl( + JSObject* obj, + Object* length, + BackingStore* backing_store); - virtual MaybeObject* SetCapacityAndLength(JSArray* array, - int capacity, - int length) { + MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength( + JSArray* array, + int capacity, + int length) { return ElementsAccessorSubclass::SetFastElementsCapacityAndLength( array, capacity, length); } - static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, - int capacity, - int length) { + MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength( + JSObject* obj, + int capacity, + int length) { UNIMPLEMENTED(); return obj; } - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) = 0; - - static MaybeObject* CopyElementsImpl(FixedArrayBase* from, - uint32_t from_start, - FixedArrayBase* to, - ElementsKind to_kind, - uint32_t to_start, - int copy_size) { + MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) = 0; + + MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, + uint32_t from_start, + FixedArrayBase* to, + ElementsKind to_kind, + uint32_t to_start, + int packed_size, + int copy_size) { UNREACHABLE(); return NULL; } - virtual MaybeObject* CopyElements(JSObject* from_holder, - uint32_t from_start, - FixedArrayBase* to, - ElementsKind to_kind, - uint32_t to_start, - int copy_size, - FixedArrayBase* from) { + MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder, + uint32_t from_start, + FixedArrayBase* to, + ElementsKind to_kind, + uint32_t to_start, + int copy_size, + FixedArrayBase* from) { + int packed_size = kPackedSizeNotKnown; if (from == NULL) { from = from_holder->elements(); } + + if (from_holder) { + ElementsKind elements_kind = from_holder->GetElementsKind(); + bool is_packed = IsFastPackedElementsKind(elements_kind) && + from_holder->IsJSArray(); + if (is_packed) { + packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value(); + if (copy_size >= 0 && packed_size > copy_size) { + packed_size = copy_size; + } + } + } if (from->length() == 0) { return from; } return ElementsAccessorSubclass::CopyElementsImpl( - from, from_start, to, to_kind, to_start, copy_size); + from, from_start, to, to_kind, to_start, packed_size, copy_size); } - virtual MaybeObject* AddElementsToFixedArray(Object* receiver, - JSObject* holder, - FixedArray* to, - FixedArrayBase* from) { + MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( + Object* receiver, + JSObject* holder, + FixedArray* to, + FixedArrayBase* from) { int len0 = to->length(); #ifdef DEBUG if (FLAG_enable_slow_asserts) { @@ -620,6 +758,7 @@ class FastElementsAccessor KindTraits>(name) {} protected: friend class ElementsAccessorBase; + friend class NonStrictArgumentsElementsAccessor; typedef typename KindTraits::BackingStore BackingStore; @@ -630,10 +769,21 @@ class FastElementsAccessor Object* length_object, uint32_t length) { uint32_t old_capacity = backing_store->length(); + Object* old_length = array->length(); + bool same_size = old_length->IsSmi() && + static_cast(Smi::cast(old_length)->value()) == length; + ElementsKind kind = array->GetElementsKind(); + + if (!same_size && IsFastElementsKind(kind) && + !IsFastHoleyElementsKind(kind)) { + kind = GetHoleyElementsKind(kind); + MaybeObject* maybe_obj = array->TransitionElementsKind(kind); + if (maybe_obj->IsFailure()) return maybe_obj; + } // Check whether the backing store should be shrunk. if (length <= old_capacity) { - if (array->HasFastTypeElements()) { + if (array->HasFastSmiOrObjectElements()) { MaybeObject* maybe_obj = array->EnsureWritableFastElements(); if (!maybe_obj->To(&backing_store)) return maybe_obj; } @@ -665,39 +815,40 @@ class FastElementsAccessor MaybeObject* result = FastElementsAccessorSubclass:: SetFastElementsCapacityAndLength(array, new_capacity, length); if (result->IsFailure()) return result; + array->ValidateElements(); return length_object; } // Request conversion to slow elements. return array->GetHeap()->undefined_value(); } -}; - - -class FastObjectElementsAccessor - : public FastElementsAccessor, - kPointerSize> { - public: - explicit FastObjectElementsAccessor(const char* name) - : FastElementsAccessor, - kPointerSize>(name) {} static MaybeObject* DeleteCommon(JSObject* obj, - uint32_t key) { - ASSERT(obj->HasFastElements() || - obj->HasFastSmiOnlyElements() || + uint32_t key, + JSReceiver::DeleteMode mode) { + ASSERT(obj->HasFastSmiOrObjectElements() || + obj->HasFastDoubleElements() || obj->HasFastArgumentsElements()); + typename KindTraits::BackingStore* backing_store = + KindTraits::BackingStore::cast(obj->elements()); Heap* heap = obj->GetHeap(); - FixedArray* backing_store = FixedArray::cast(obj->elements()); if (backing_store->map() == heap->non_strict_arguments_elements_map()) { - backing_store = FixedArray::cast(backing_store->get(1)); + backing_store = + KindTraits::BackingStore::cast( + FixedArray::cast(backing_store)->get(1)); } else { - Object* writable; - MaybeObject* maybe = obj->EnsureWritableFastElements(); - if (!maybe->ToObject(&writable)) return maybe; - backing_store = FixedArray::cast(writable); + ElementsKind kind = KindTraits::Kind; + if (IsFastPackedElementsKind(kind)) { + MaybeObject* transitioned = + obj->TransitionElementsKind(GetHoleyElementsKind(kind)); + if (transitioned->IsFailure()) return transitioned; + } + if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) { + Object* writable; + MaybeObject* maybe = obj->EnsureWritableFastElements(); + if (!maybe->ToObject(&writable)) return maybe; + backing_store = KindTraits::BackingStore::cast(writable); + } } uint32_t length = static_cast( obj->IsJSArray() @@ -709,15 +860,14 @@ class FastObjectElementsAccessor // has too few used values, normalize it. // To avoid doing the check on every delete we require at least // one adjacent hole to the value being deleted. - Object* hole = heap->the_hole_value(); const int kMinLengthForSparsenessCheck = 64; if (backing_store->length() >= kMinLengthForSparsenessCheck && !heap->InNewSpace(backing_store) && - ((key > 0 && backing_store->get(key - 1) == hole) || - (key + 1 < length && backing_store->get(key + 1) == hole))) { + ((key > 0 && backing_store->is_the_hole(key - 1)) || + (key + 1 < length && backing_store->is_the_hole(key + 1)))) { int num_used = 0; for (int i = 0; i < backing_store->length(); ++i) { - if (backing_store->get(i) != hole) ++num_used; + if (!backing_store->is_the_hole(i)) ++num_used; // Bail out early if more than 1/4 is used. if (4 * num_used > backing_store->length()) break; } @@ -730,27 +880,90 @@ class FastObjectElementsAccessor return heap->true_value(); } + virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { + return DeleteCommon(obj, key, mode); + } + + static bool HasElementImpl( + Object* receiver, + JSObject* holder, + uint32_t key, + typename KindTraits::BackingStore* backing_store) { + if (key >= static_cast(backing_store->length())) { + return false; + } + return !backing_store->is_the_hole(key); + } + + static void ValidateContents(JSObject* holder, int length) { +#if DEBUG + FixedArrayBase* elements = holder->elements(); + Heap* heap = elements->GetHeap(); + Map* map = elements->map(); + ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) && + (map == heap->fixed_array_map() || + map == heap->fixed_cow_array_map())) || + (IsFastDoubleElementsKind(KindTraits::Kind) == + ((map == heap->fixed_array_map() && length == 0) || + map == heap->fixed_double_array_map()))); + for (int i = 0; i < length; i++) { + typename KindTraits::BackingStore* backing_store = + KindTraits::BackingStore::cast(elements); + ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) || + static_cast(backing_store->get(i))->IsSmi()) || + (IsFastHoleyElementsKind(KindTraits::Kind) == + backing_store->is_the_hole(i))); + } +#endif + } +}; + + +template +class FastSmiOrObjectElementsAccessor + : public FastElementsAccessor { + public: + explicit FastSmiOrObjectElementsAccessor(const char* name) + : FastElementsAccessor(name) {} + static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, ElementsKind to_kind, uint32_t to_start, + int packed_size, int copy_size) { - switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { - CopyObjectToObjectElements( - FixedArray::cast(from), ElementsTraits::Kind, from_start, - FixedArray::cast(to), to_kind, to_start, copy_size); - return from; - } - case FAST_DOUBLE_ELEMENTS: + if (IsFastSmiOrObjectElementsKind(to_kind)) { + CopyObjectToObjectElements( + FixedArray::cast(from), KindTraits::Kind, from_start, + FixedArray::cast(to), to_kind, to_start, copy_size); + } else if (IsFastDoubleElementsKind(to_kind)) { + if (IsFastSmiElementsKind(KindTraits::Kind)) { + if (IsFastPackedElementsKind(KindTraits::Kind) && + packed_size != kPackedSizeNotKnown) { + CopyPackedSmiToDoubleElements( + FixedArray::cast(from), from_start, + FixedDoubleArray::cast(to), to_start, + packed_size, copy_size); + } else { + CopySmiToDoubleElements( + FixedArray::cast(from), from_start, + FixedDoubleArray::cast(to), to_start, copy_size); + } + } else { CopyObjectToDoubleElements( FixedArray::cast(from), from_start, FixedDoubleArray::cast(to), to_start, copy_size); - return from; - default: - UNREACHABLE(); + } + } else { + UNREACHABLE(); } return to->GetHeap()->undefined_value(); } @@ -759,64 +972,102 @@ class FastObjectElementsAccessor static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, uint32_t capacity, uint32_t length) { - JSObject::SetFastElementsCapacityMode set_capacity_mode = - obj->HasFastSmiOnlyElements() - ? JSObject::kAllowSmiOnlyElements - : JSObject::kDontAllowSmiOnlyElements; + JSObject::SetFastElementsCapacitySmiMode set_capacity_mode = + obj->HasFastSmiElements() + ? JSObject::kAllowSmiElements + : JSObject::kDontAllowSmiElements; return obj->SetFastElementsCapacityAndLength(capacity, length, set_capacity_mode); } +}; - protected: - friend class FastElementsAccessor, - kPointerSize>; - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { - return DeleteCommon(obj, key); - } +class FastPackedSmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedSmiElementsAccessor, + ElementsKindTraits > { + public: + explicit FastPackedSmiElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastPackedSmiElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleySmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleySmiElementsAccessor, + ElementsKindTraits > { + public: + explicit FastHoleySmiElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastHoleySmiElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastPackedObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedObjectElementsAccessor, + ElementsKindTraits > { + public: + explicit FastPackedObjectElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastPackedObjectElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleyObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleyObjectElementsAccessor, + ElementsKindTraits > { + public: + explicit FastHoleyObjectElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastHoleyObjectElementsAccessor, + ElementsKindTraits >(name) {} }; +template class FastDoubleElementsAccessor - : public FastElementsAccessor, + : public FastElementsAccessor { public: explicit FastDoubleElementsAccessor(const char* name) - : FastElementsAccessor, + : FastElementsAccessor(name) {} static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, uint32_t capacity, uint32_t length) { - return obj->SetFastDoubleElementsCapacityAndLength(capacity, length); + return obj->SetFastDoubleElementsCapacityAndLength(capacity, + length); } protected: - friend class ElementsAccessorBase >; - friend class FastElementsAccessor, - kDoubleSize>; - static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, ElementsKind to_kind, uint32_t to_start, + int packed_size, int copy_size) { switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: return CopyDoubleToObjectElements( FixedDoubleArray::cast(from), from_start, FixedArray::cast(to), to_kind, to_start, copy_size); case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start, FixedDoubleArray::cast(to), to_start, copy_size); @@ -826,26 +1077,35 @@ class FastDoubleElementsAccessor } return to->GetHeap()->undefined_value(); } +}; - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { - int length = obj->IsJSArray() - ? Smi::cast(JSArray::cast(obj)->length())->value() - : FixedDoubleArray::cast(obj->elements())->length(); - if (key < static_cast(length)) { - FixedDoubleArray::cast(obj->elements())->set_the_hole(key); - } - return obj->GetHeap()->true_value(); - } - static bool HasElementImpl(Object* receiver, - JSObject* holder, - uint32_t key, - FixedDoubleArray* backing_store) { - return key < static_cast(backing_store->length()) && - !backing_store->is_the_hole(key); - } +class FastPackedDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastPackedDoubleElementsAccessor, + ElementsKindTraits > { + public: + friend class ElementsAccessorBase >; + explicit FastPackedDoubleElementsAccessor(const char* name) + : FastDoubleElementsAccessor< + FastPackedDoubleElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleyDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits > { + public: + friend class ElementsAccessorBase< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits >; + explicit FastHoleyDoubleElementsAccessor(const char* name) + : FastDoubleElementsAccessor< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits >(name) {} }; @@ -866,27 +1126,28 @@ class ExternalElementsAccessor friend class ElementsAccessorBase >; - static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - BackingStore* backing_store) { + MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, + JSObject* obj, + uint32_t key, + BackingStore* backing_store) { return key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store) ? backing_store->get(key) : backing_store->GetHeap()->undefined_value(); } - static MaybeObject* SetLengthImpl(JSObject* obj, - Object* length, - BackingStore* backing_store) { + MUST_USE_RESULT static MaybeObject* SetLengthImpl( + JSObject* obj, + Object* length, + BackingStore* backing_store) { // External arrays do not support changing their length. UNREACHABLE(); return obj; } - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { + MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { // External arrays always ignore deletes. return obj->GetHeap()->true_value(); } @@ -1002,10 +1263,11 @@ class DictionaryElementsAccessor // Adjusts the length of the dictionary backing store and returns the new // length according to ES5 section 15.4.5.2 behavior. - static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict, - JSArray* array, - Object* length_object, - uint32_t length) { + MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize( + SeededNumberDictionary* dict, + JSArray* array, + Object* length_object, + uint32_t length) { if (length == 0) { // If the length of a slow array is reset to zero, we clear // the array and flush backing storage. This has the added @@ -1057,9 +1319,10 @@ class DictionaryElementsAccessor return length_object; } - static MaybeObject* DeleteCommon(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { + MUST_USE_RESULT static MaybeObject* DeleteCommon( + JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { Isolate* isolate = obj->GetIsolate(); Heap* heap = isolate->heap(); FixedArray* backing_store = FixedArray::cast(obj->elements()); @@ -1102,20 +1365,24 @@ class DictionaryElementsAccessor return heap->true_value(); } - static MaybeObject* CopyElementsImpl(FixedArrayBase* from, - uint32_t from_start, - FixedArrayBase* to, - ElementsKind to_kind, - uint32_t to_start, - int copy_size) { + MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, + uint32_t from_start, + FixedArrayBase* to, + ElementsKind to_kind, + uint32_t to_start, + int packed_size, + int copy_size) { switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: CopyDictionaryToObjectElements( SeededNumberDictionary::cast(from), from_start, FixedArray::cast(to), to_kind, to_start, copy_size); return from; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: CopyDictionaryToDoubleElements( SeededNumberDictionary::cast(from), from_start, FixedDoubleArray::cast(to), to_start, copy_size); @@ -1131,16 +1398,17 @@ class DictionaryElementsAccessor friend class ElementsAccessorBase >; - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { + MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { return DeleteCommon(obj, key, mode); } - static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - SeededNumberDictionary* backing_store) { + MUST_USE_RESULT static MaybeObject* GetImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + SeededNumberDictionary* backing_store) { int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { Object* element = backing_store->ValueAt(entry); @@ -1186,10 +1454,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< NonStrictArgumentsElementsAccessor, ElementsKindTraits >; - static MaybeObject* GetImpl(Object* receiver, - JSObject* obj, - uint32_t key, - FixedArray* parameter_map) { + MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, + JSObject* obj, + uint32_t key, + FixedArray* parameter_map) { Object* probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { Context* context = Context::cast(parameter_map->get(0)); @@ -1216,18 +1484,19 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< } } - static MaybeObject* SetLengthImpl(JSObject* obj, - Object* length, - FixedArray* parameter_map) { + MUST_USE_RESULT static MaybeObject* SetLengthImpl( + JSObject* obj, + Object* length, + FixedArray* parameter_map) { // TODO(mstarzinger): This was never implemented but will be used once we // correctly implement [[DefineOwnProperty]] on arrays. UNIMPLEMENTED(); return obj; } - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { + MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { FixedArray* parameter_map = FixedArray::cast(obj->elements()); Object* probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { @@ -1240,18 +1509,22 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< if (arguments->IsDictionary()) { return DictionaryElementsAccessor::DeleteCommon(obj, key, mode); } else { - return FastObjectElementsAccessor::DeleteCommon(obj, key); + // It's difficult to access the version of DeleteCommon that is declared + // in the templatized super class, call the concrete implementation in + // the class for the most generalized ElementsKind subclass. + return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode); } } return obj->GetHeap()->true_value(); } - static MaybeObject* CopyElementsImpl(FixedArrayBase* from, - uint32_t from_start, - FixedArrayBase* to, - ElementsKind to_kind, - uint32_t to_start, - int copy_size) { + MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, + uint32_t from_start, + FixedArrayBase* to, + ElementsKind to_kind, + uint32_t to_start, + int packed_size, + int copy_size) { FixedArray* parameter_map = FixedArray::cast(from); FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments); @@ -1304,7 +1577,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { if (array->IsDictionary()) { return elements_accessors_[DICTIONARY_ELEMENTS]; } else { - return elements_accessors_[FAST_ELEMENTS]; + return elements_accessors_[FAST_HOLEY_ELEMENTS]; } case EXTERNAL_BYTE_ARRAY_TYPE: return elements_accessors_[EXTERNAL_BYTE_ELEMENTS]; @@ -1332,18 +1605,8 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { void ElementsAccessor::InitializeOncePerProcess() { - static struct ConcreteElementsAccessors { -#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler; - ELEMENTS_LIST(ACCESSOR_STRUCT) -#undef ACCESSOR_STRUCT - } element_accessors = { -#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind), - ELEMENTS_LIST(ACCESSOR_INIT) -#undef ACCESSOR_INIT - }; - static ElementsAccessor* accessor_array[] = { -#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler, +#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind), ELEMENTS_LIST(ACCESSOR_ARRAY) #undef ACCESSOR_ARRAY }; @@ -1355,9 +1618,17 @@ void ElementsAccessor::InitializeOncePerProcess() { } +void ElementsAccessor::TearDown() { +#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind]; + ELEMENTS_LIST(ACCESSOR_DELETE) +#undef ACCESSOR_DELETE + elements_accessors_ = NULL; +} + + template -MaybeObject* ElementsAccessorBase:: +MUST_USE_RESULT MaybeObject* ElementsAccessorBase:: SetLengthImpl(JSObject* obj, Object* length, typename ElementsKindTraits::BackingStore* backing_store) { diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h index ff97c08324..822fca50ee 100644 --- a/deps/v8/src/elements.h +++ b/deps/v8/src/elements.h @@ -28,6 +28,7 @@ #ifndef V8_ELEMENTS_H_ #define V8_ELEMENTS_H_ +#include "elements-kind.h" #include "objects.h" #include "heap.h" #include "isolate.h" @@ -45,6 +46,10 @@ class ElementsAccessor { virtual ElementsKind kind() const = 0; const char* name() const { return name_; } + // Checks the elements of an object for consistency, asserting when a problem + // is found. + virtual void Validate(JSObject* obj) = 0; + // Returns true if a holder contains an element with the specified key // without iterating up the prototype chain. The caller can optionally pass // in the backing store to use for the check, which must be compatible with @@ -60,18 +65,19 @@ class ElementsAccessor { // can optionally pass in the backing store to use for the check, which must // be compatible with the ElementsKind of the ElementsAccessor. If // backing_store is NULL, the holder->elements() is used as the backing store. - virtual MaybeObject* Get(Object* receiver, - JSObject* holder, - uint32_t key, - FixedArrayBase* backing_store = NULL) = 0; + MUST_USE_RESULT virtual MaybeObject* Get( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store = NULL) = 0; // Modifies the length data property as specified for JSArrays and resizes the // underlying backing store accordingly. The method honors the semantics of // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that // have non-deletable elements can only be shrunk to the size of highest // element that is non-deletable. - virtual MaybeObject* SetLength(JSArray* holder, - Object* new_length) = 0; + MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder, + Object* new_length) = 0; // Modifies both the length and capacity of a JSArray, resizing the underlying // backing store as necessary. This method does NOT honor the semantics of @@ -79,14 +85,14 @@ class ElementsAccessor { // elements. This method should only be called for array expansion OR by // runtime JavaScript code that use InternalArrays and don't care about // EcmaScript 5.1 semantics. - virtual MaybeObject* SetCapacityAndLength(JSArray* array, - int capacity, - int length) = 0; + MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array, + int capacity, + int length) = 0; // Deletes an element in an object, returning a new elements backing store. - virtual MaybeObject* Delete(JSObject* holder, - uint32_t key, - JSReceiver::DeleteMode mode) = 0; + MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder, + uint32_t key, + JSReceiver::DeleteMode mode) = 0; // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all // of elements from source after source_start to the destination array. @@ -101,26 +107,28 @@ class ElementsAccessor { // the source JSObject or JSArray in source_holder. If the holder's backing // store is available, it can be passed in source and source_holder is // ignored. - virtual MaybeObject* CopyElements(JSObject* source_holder, - uint32_t source_start, - FixedArrayBase* destination, - ElementsKind destination_kind, - uint32_t destination_start, - int copy_size, - FixedArrayBase* source = NULL) = 0; - - MaybeObject* CopyElements(JSObject* from_holder, - FixedArrayBase* to, - ElementsKind to_kind, - FixedArrayBase* from = NULL) { + MUST_USE_RESULT virtual MaybeObject* CopyElements( + JSObject* source_holder, + uint32_t source_start, + FixedArrayBase* destination, + ElementsKind destination_kind, + uint32_t destination_start, + int copy_size, + FixedArrayBase* source = NULL) = 0; + + MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder, + FixedArrayBase* to, + ElementsKind to_kind, + FixedArrayBase* from = NULL) { return CopyElements(from_holder, 0, to, to_kind, 0, kCopyToEndAndInitializeToHole, from); } - virtual MaybeObject* AddElementsToFixedArray(Object* receiver, - JSObject* holder, - FixedArray* to, - FixedArrayBase* from = NULL) = 0; + MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( + Object* receiver, + JSObject* holder, + FixedArray* to, + FixedArrayBase* from = NULL) = 0; // Returns a shared ElementsAccessor for the specified ElementsKind. static ElementsAccessor* ForKind(ElementsKind elements_kind) { @@ -131,6 +139,7 @@ class ElementsAccessor { static ElementsAccessor* ForArray(FixedArrayBase* array); static void InitializeOncePerProcess(); + static void TearDown(); protected: friend class NonStrictArgumentsElementsAccessor; diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc index 9fbf329818..50d876136f 100644 --- a/deps/v8/src/extensions/externalize-string-extension.cc +++ b/deps/v8/src/extensions/externalize-string-extension.cc @@ -133,11 +133,8 @@ v8::Handle ExternalizeStringExtension::IsAscii( void ExternalizeStringExtension::Register() { - static ExternalizeStringExtension* externalize_extension = NULL; - if (externalize_extension == NULL) - externalize_extension = new ExternalizeStringExtension; - static v8::DeclareExtension externalize_extension_declaration( - externalize_extension); + static ExternalizeStringExtension externalize_extension; + static v8::DeclareExtension declaration(&externalize_extension); } } } // namespace v8::internal diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc index 573797e174..f921552aaa 100644 --- a/deps/v8/src/extensions/gc-extension.cc +++ b/deps/v8/src/extensions/gc-extension.cc @@ -46,9 +46,8 @@ v8::Handle GCExtension::GC(const v8::Arguments& args) { void GCExtension::Register() { - static GCExtension* gc_extension = NULL; - if (gc_extension == NULL) gc_extension = new GCExtension(); - static v8::DeclareExtension gc_extension_declaration(gc_extension); + static GCExtension gc_extension; + static v8::DeclareExtension declaration(&gc_extension); } } } // namespace v8::internal diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index e8a9f26a5c..28b318a8f4 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -34,6 +34,7 @@ #include "macro-assembler.h" #include "objects.h" #include "objects-visiting.h" +#include "platform.h" #include "scopeinfo.h" namespace v8 { @@ -114,7 +115,8 @@ Handle Factory::NewObjectHashTable(int at_least_space_for) { Handle Factory::NewDescriptorArray(int number_of_descriptors) { ASSERT(0 <= number_of_descriptors); CALL_HEAP_FUNCTION(isolate(), - DescriptorArray::Allocate(number_of_descriptors), + DescriptorArray::Allocate(number_of_descriptors, + DescriptorArray::MAY_BE_SHARED), DescriptorArray); } @@ -291,6 +293,15 @@ Handle Factory::NewGlobalContext() { } +Handle Factory::NewModuleContext(Handle previous, + Handle scope_info) { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateModuleContext(*previous, *scope_info), + Context); +} + + Handle Factory::NewFunctionContext(int length, Handle function) { CALL_HEAP_FUNCTION( @@ -324,10 +335,9 @@ Handle Factory::NewWithContext(Handle function, } -Handle Factory::NewBlockContext( - Handle function, - Handle previous, - Handle scope_info) { +Handle Factory::NewBlockContext(Handle function, + Handle previous, + Handle scope_info) { CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateBlockContext(*function, @@ -487,7 +497,9 @@ Handle Factory::CopyMap(Handle src, Handle Factory::CopyMapDropTransitions(Handle src) { - CALL_HEAP_FUNCTION(isolate(), src->CopyDropTransitions(), Map); + CALL_HEAP_FUNCTION(isolate(), + src->CopyDropTransitions(DescriptorArray::MAY_BE_SHARED), + Map); } @@ -667,6 +679,43 @@ Handle Factory::NewError(const char* type, } +Handle Factory::EmergencyNewError(const char* type, + Handle args) { + const int kBufferSize = 1000; + char buffer[kBufferSize]; + size_t space = kBufferSize; + char* p = &buffer[0]; + + Vector v(buffer, kBufferSize); + OS::StrNCpy(v, type, space); + space -= Min(space, strlen(type)); + p = &buffer[kBufferSize] - space; + + for (unsigned i = 0; i < ARRAY_SIZE(args); i++) { + if (space > 0) { + *p++ = ' '; + space--; + if (space > 0) { + MaybeObject* maybe_arg = args->GetElement(i); + Handle arg_str(reinterpret_cast(maybe_arg)); + const char* arg = *arg_str->ToCString(); + Vector v2(p, space); + OS::StrNCpy(v2, arg, space); + space -= Min(space, strlen(arg)); + p = &buffer[kBufferSize] - space; + } + } + } + if (space > 0) { + *p = '\0'; + } else { + buffer[kBufferSize - 1] = '\0'; + } + Handle error_string = NewStringFromUtf8(CStrVector(buffer), TENURED); + return error_string; +} + + Handle Factory::NewError(const char* maker, const char* type, Handle args) { @@ -675,8 +724,9 @@ Handle Factory::NewError(const char* maker, isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str)); // If the builtins haven't been properly configured yet this error // constructor may not have been defined. Bail out. - if (!fun_obj->IsJSFunction()) - return undefined_value(); + if (!fun_obj->IsJSFunction()) { + return EmergencyNewError(type, args); + } Handle fun = Handle::cast(fun_obj); Handle type_obj = LookupAsciiSymbol(type); Handle argv[] = { type_obj, args }; @@ -767,7 +817,7 @@ Handle Factory::NewFunctionWithPrototype(Handle name, instance_size != JSObject::kHeaderSize) { Handle initial_map = NewMap(type, instance_size, - FAST_SMI_ONLY_ELEMENTS); + GetInitialFastElementsKind()); function->set_initial_map(*initial_map); initial_map->set_constructor(*function); } @@ -892,7 +942,7 @@ Handle Factory::CopyAppendCallbackDescriptors( Handle key = SymbolFromString(Handle(String::cast(entry->name()))); // Check if a descriptor with this name already exists before writing. - if (result->LinearSearch(*key, descriptor_count) == + if (result->LinearSearch(EXPECT_UNSORTED, *key, descriptor_count) == DescriptorArray::kNotFound) { CallbacksDescriptor desc(*key, *entry, entry->property_attributes()); result->Set(descriptor_count, &desc, witness); @@ -928,6 +978,13 @@ Handle Factory::NewJSObject(Handle constructor, } +Handle Factory::NewJSModule() { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateJSModule(), JSModule); +} + + Handle Factory::NewGlobalObject( Handle constructor) { CALL_HEAP_FUNCTION(isolate(), @@ -998,10 +1055,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle array) { void Factory::EnsureCanContainElements(Handle array, Handle elements, + uint32_t length, EnsureElementsMode mode) { CALL_HEAP_FUNCTION_VOID( isolate(), - array->EnsureCanContainElements(*elements, mode)); + array->EnsureCanContainElements(*elements, length, mode)); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 786d4a983a..bb435456b0 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -162,9 +162,12 @@ class Factory { // Create a global (but otherwise uninitialized) context. Handle NewGlobalContext(); + // Create a module context. + Handle NewModuleContext(Handle previous, + Handle scope_info); + // Create a function context. - Handle NewFunctionContext(int length, - Handle function); + Handle NewFunctionContext(int length, Handle function); // Create a catch context. Handle NewCatchContext(Handle function, @@ -177,7 +180,7 @@ class Factory { Handle previous, Handle extension); - // Create a 'block' context. + // Create a block context. Handle NewBlockContext(Handle function, Handle previous, Handle scope_info); @@ -213,9 +216,10 @@ class Factory { Handle NewJSGlobalPropertyCell( Handle value); - Handle NewMap(InstanceType type, - int instance_size, - ElementsKind elements_kind = FAST_ELEMENTS); + Handle NewMap( + InstanceType type, + int instance_size, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); Handle NewFunctionPrototype(Handle function); @@ -262,14 +266,18 @@ class Factory { // runtime. Handle NewJSObjectFromMap(Handle map); + // JS modules are pretenured. + Handle NewJSModule(); + // JS arrays are pretenured when allocated by the parser. - Handle NewJSArray(int capacity, - ElementsKind elements_kind = FAST_ELEMENTS, - PretenureFlag pretenure = NOT_TENURED); + Handle NewJSArray( + int capacity, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, + PretenureFlag pretenure = NOT_TENURED); Handle NewJSArrayWithElements( Handle elements, - ElementsKind elements_kind = FAST_ELEMENTS, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, PretenureFlag pretenure = NOT_TENURED); void SetElementsCapacityAndLength(Handle array, @@ -281,6 +289,7 @@ class Factory { void EnsureCanContainHeapObjectElements(Handle array); void EnsureCanContainElements(Handle array, Handle elements, + uint32_t length, EnsureElementsMode mode); Handle NewJSProxy(Handle handler, Handle prototype); @@ -329,6 +338,7 @@ class Factory { Handle NewError(const char* maker, const char* type, Handle args); + Handle EmergencyNewError(const char* type, Handle args); Handle NewError(const char* maker, const char* type, Vector< Handle > args); Handle NewError(const char* type, diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 75697a8906..2b4c53cd2d 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -132,6 +132,10 @@ public: // Flags for language modes and experimental language features. DEFINE_bool(use_strict, false, "enforce strict mode") +DEFINE_bool(es5_readonly, false, + "activate correct semantics for inheriting readonliness") +DEFINE_bool(es52_globals, false, + "activate new semantics for global var declarations") DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") @@ -148,6 +152,7 @@ DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) // Flags for experimental implementation features. +DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true, @@ -165,7 +170,12 @@ DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis") DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true, "use function inlining") -DEFINE_bool(limit_inlining, true, "limit code size growth from inlining") +DEFINE_int(max_inlined_source_size, 600, + "maximum source size in bytes considered for a single inlining") +DEFINE_int(max_inlined_nodes, 196, + "maximum number of AST nodes considered for a single inlining") +DEFINE_int(max_inlined_nodes_cumulative, 196, + "maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true, @@ -188,6 +198,10 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(use_osr, true, "use on-stack replacement") +DEFINE_bool(array_bounds_checks_elimination, false, + "perform array bounds checks elimination") +DEFINE_bool(array_index_dehoisting, false, + "perform array index dehoisting") DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 0571a813f5..b7e028634f 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -469,6 +469,20 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const { } +Address StackFrame::UnpaddedFP() const { +#if defined(V8_TARGET_ARCH_IA32) + if (!is_optimized()) return fp(); + int32_t alignment_state = Memory::int32_at( + fp() + JavaScriptFrameConstants::kDynamicAlignmentStateOffset); + + return (alignment_state == kAlignmentPaddingPushed) ? + (fp() + kPointerSize) : fp(); +#else + return fp(); +#endif +} + + Code* EntryFrame::unchecked_code() const { return HEAP->raw_unchecked_js_entry_code(); } @@ -1359,34 +1373,28 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* // ------------------------------------------------------------------------- int NumRegs(RegList reglist) { - int n = 0; - while (reglist != 0) { - n++; - reglist &= reglist - 1; // clear one bit - } - return n; + return CompilerIntrinsics::CountSetBits(reglist); } struct JSCallerSavedCodeData { - JSCallerSavedCodeData() { - int i = 0; - for (int r = 0; r < kNumRegs; r++) - if ((kJSCallerSaved & (1 << r)) != 0) - reg_code[i++] = r; - - ASSERT(i == kNumJSCallerSaved); - } int reg_code[kNumJSCallerSaved]; }; +JSCallerSavedCodeData caller_saved_code_data; -static LazyInstance::type caller_saved_code_data = - LAZY_INSTANCE_INITIALIZER; +void SetUpJSCallerSavedCodeData() { + int i = 0; + for (int r = 0; r < kNumRegs; r++) + if ((kJSCallerSaved & (1 << r)) != 0) + caller_saved_code_data.reg_code[i++] = r; + + ASSERT(i == kNumJSCallerSaved); +} int JSCallerSavedCode(int n) { ASSERT(0 <= n && n < kNumJSCallerSaved); - return caller_saved_code_data.Get().reg_code[n]; + return caller_saved_code_data.reg_code[n]; } @@ -1400,11 +1408,11 @@ class field##_Wrapper : public ZoneObject { \ STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER) #undef DEFINE_WRAPPER -static StackFrame* AllocateFrameCopy(StackFrame* frame) { +static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) { #define FRAME_TYPE_CASE(type, field) \ case StackFrame::type: { \ field##_Wrapper* wrapper = \ - new field##_Wrapper(*(reinterpret_cast(frame))); \ + new(zone) field##_Wrapper(*(reinterpret_cast(frame))); \ return &wrapper->frame_; \ } @@ -1416,11 +1424,11 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame) { return NULL; } -Vector CreateStackMap() { - ZoneList list(10); +Vector CreateStackMap(Zone* zone) { + ZoneList list(10, zone); for (StackFrameIterator it; !it.done(); it.Advance()) { - StackFrame* frame = AllocateFrameCopy(it.frame()); - list.Add(frame); + StackFrame* frame = AllocateFrameCopy(it.frame(), zone); + list.Add(frame, zone); } return list.ToVector(); } diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 9071555197..2d45932d09 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -40,6 +40,8 @@ typedef uint32_t RegList; // Get the number of registers in a given register list. int NumRegs(RegList list); +void SetUpJSCallerSavedCodeData(); + // Return the code of the n-th saved register available to JavaScript. int JSCallerSavedCode(int n); @@ -204,11 +206,19 @@ class StackFrame BASE_EMBEDDED { Address fp() const { return state_.fp; } Address caller_sp() const { return GetCallerStackPointer(); } + // If this frame is optimized and was dynamically aligned return its old + // unaligned frame pointer. When the frame is deoptimized its FP will shift + // up one word and become unaligned. + Address UnpaddedFP() const; + Address pc() const { return *pc_address(); } void set_pc(Address pc) { *pc_address() = pc; } virtual void SetCallerFp(Address caller_fp) = 0; + // Manually changes value of fp in this object. + void UpdateFp(Address fp) { state_.fp = fp; } + Address* pc_address() const { return state_.pc_address; } // Get the id of this stack frame. @@ -883,7 +893,7 @@ class StackFrameLocator BASE_EMBEDDED { // Reads all frames on the current stack and copies them into the current // zone memory. -Vector CreateStackMap(); +Vector CreateStackMap(Zone* zone); } } // namespace v8::internal diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 44fe011a4e..4da4e531ee 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -303,7 +303,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { masm.positions_recorder()->StartGDBJITLineInfoRecording(); #endif - FullCodeGenerator cgen(&masm, info); + FullCodeGenerator cgen(&masm, info, isolate->zone()); cgen.Generate(); if (cgen.HasStackOverflow()) { ASSERT(!isolate->has_pending_exception()); @@ -316,7 +316,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_optimizable(info->IsOptimizable() && !info->function()->flags()->Contains(kDontOptimize) && info->function()->scope()->AllowsLazyRecompilation()); - code->set_self_optimization_header(cgen.has_self_optimization_header_); cgen.PopulateDeoptimizationData(code); cgen.PopulateTypeFeedbackInfo(code); cgen.PopulateTypeFeedbackCells(code); @@ -332,9 +331,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_stack_check_table_offset(table_offset); CodeGenerator::PrintCode(code, info); info->SetCode(code); // May be an empty handle. - if (!code.is_null()) { - isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size()); - } #ifdef ENABLE_GDB_JIT_INTERFACE if (FLAG_gdbjit && !code.is_null()) { GDBJITLineInfo* lineinfo = @@ -444,14 +440,14 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) { } } #endif // DEBUG - bailout_entries_.Add(entry); + bailout_entries_.Add(entry, zone()); } void FullCodeGenerator::RecordTypeFeedbackCell( unsigned id, Handle cell) { TypeFeedbackCellEntry entry = { id, cell }; - type_feedback_cells_.Add(entry); + type_feedback_cells_.Add(entry, zone()); } @@ -460,7 +456,7 @@ void FullCodeGenerator::RecordStackCheck(unsigned ast_id) { // state. ASSERT(masm_->pc_offset() > 0); BailoutEntry entry = { ast_id, static_cast(masm_->pc_offset()) }; - stack_checks_.Add(entry); + stack_checks_.Add(entry, zone()); } @@ -573,88 +569,91 @@ void FullCodeGenerator::DoTest(const TestContext* context) { void FullCodeGenerator::VisitDeclarations( ZoneList* declarations) { - int save_global_count = global_count_; - global_count_ = 0; + ZoneList >* saved_globals = globals_; + ZoneList > inner_globals(10, zone()); + globals_ = &inner_globals; AstVisitor::VisitDeclarations(declarations); - - // Batch declare global functions and variables. - if (global_count_ > 0) { - Handle array = - isolate()->factory()->NewFixedArray(2 * global_count_, TENURED); - int length = declarations->length(); - for (int j = 0, i = 0; i < length; i++) { - Declaration* decl = declarations->at(i); - Variable* var = decl->proxy()->var(); - - if (var->IsUnallocated()) { - array->set(j++, *(var->name())); - FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration(); - if (fun_decl == NULL) { - if (var->binding_needs_init()) { - // In case this binding needs initialization use the hole. - array->set_the_hole(j++); - } else { - array->set_undefined(j++); - } - } else { - Handle function = - Compiler::BuildFunctionInfo(fun_decl->fun(), script()); - // Check for stack-overflow exception. - if (function.is_null()) { - SetStackOverflow(); - return; - } - array->set(j++, *function); - } - } - } + if (!globals_->is_empty()) { // Invoke the platform-dependent code generator to do the actual // declaration the global functions and variables. + Handle array = + isolate()->factory()->NewFixedArray(globals_->length(), TENURED); + for (int i = 0; i < globals_->length(); ++i) + array->set(i, *globals_->at(i)); DeclareGlobals(array); } - global_count_ = save_global_count; -} - - -void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) { - EmitDeclaration(decl->proxy(), decl->mode(), NULL); + globals_ = saved_globals; } -void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) { - EmitDeclaration(decl->proxy(), decl->mode(), decl->fun()); -} - - -void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) { - EmitDeclaration(decl->proxy(), decl->mode(), NULL); -} - - -void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) { - EmitDeclaration(decl->proxy(), decl->mode(), NULL); -} +void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { + Handle instance = module->interface()->Instance(); + ASSERT(!instance.is_null()); + // Allocate a module context statically. + Block* block = module->body(); + Scope* saved_scope = scope(); + scope_ = block->scope(); + Handle scope_info = scope_->GetScopeInfo(); + + // Generate code for module creation and linking. + Comment cmnt(masm_, "[ ModuleLiteral"); + SetStatementPosition(block); + + if (scope_info->HasContext()) { + // Set up module context. + __ Push(scope_info); + __ Push(instance); + __ CallRuntime(Runtime::kPushModuleContext, 2); + StoreToFrameField( + StandardFrameConstants::kContextOffset, context_register()); + } -void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) { - // TODO(rossberg) -} + { + Comment cmnt(masm_, "[ Declarations"); + VisitDeclarations(scope_->declarations()); + } + scope_ = saved_scope; + if (scope_info->HasContext()) { + // Pop module context. + LoadContextField(context_register(), Context::PREVIOUS_INDEX); + // Update local stack frame context field. + StoreToFrameField( + StandardFrameConstants::kContextOffset, context_register()); + } -void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { - // TODO(rossberg) + // Populate module instance object. + const PropertyAttributes attr = + static_cast(READ_ONLY | DONT_DELETE | DONT_ENUM); + for (Interface::Iterator it = module->interface()->iterator(); + !it.done(); it.Advance()) { + if (it.interface()->IsModule()) { + Handle value = it.interface()->Instance(); + ASSERT(!value.is_null()); + JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode); + } else { + // TODO(rossberg): set proper getters instead of undefined... + // instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr); + Handle value(isolate()->heap()->undefined_value()); + JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode); + } + } + USE(instance->PreventExtensions()); } void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) { - // TODO(rossberg) + // Noting to do. + // The instance object is resolved statically through the module's interface. } void FullCodeGenerator::VisitModulePath(ModulePath* module) { - // TODO(rossberg) + // Noting to do. + // The instance object is resolved statically through the module's interface. } @@ -916,9 +915,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) { Scope* saved_scope = scope(); // Push a block context when entering a block with block scoped variables. - if (stmt->block_scope() != NULL) { + if (stmt->scope() != NULL) { { Comment cmnt(masm_, "[ Extend block context"); - scope_ = stmt->block_scope(); + scope_ = stmt->scope(); Handle scope_info = scope_->GetScopeInfo(); int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS; __ Push(scope_info); @@ -945,7 +944,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) { PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); // Pop block context if necessary. - if (stmt->block_scope() != NULL) { + if (stmt->scope() != NULL) { LoadContextField(context_register(), Context::PREVIOUS_INDEX); // Update local stack frame context field. StoreToFrameField(StandardFrameConstants::kContextOffset, diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 58d59862a5..928de47b31 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -77,28 +77,25 @@ class FullCodeGenerator: public AstVisitor { TOS_REG }; - FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info) + FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info, + Zone* zone) : masm_(masm), info_(info), scope_(info->scope()), nesting_stack_(NULL), loop_depth_(0), - global_count_(0), + globals_(NULL), context_(NULL), bailout_entries_(info->HasDeoptimizationSupport() - ? info->function()->ast_node_count() : 0), - stack_checks_(2), // There's always at least one. + ? info->function()->ast_node_count() : 0, zone), + stack_checks_(2, zone), // There's always at least one. type_feedback_cells_(info->HasDeoptimizationSupport() - ? info->function()->ast_node_count() : 0), + ? info->function()->ast_node_count() : 0, zone), ic_total_count_(0), - has_self_optimization_header_(false) { } + zone_(zone) { } static bool MakeCode(CompilationInfo* info); - // Returns the platform-specific size in bytes of the self-optimization - // header. - static int self_optimization_header_size(); - // Encode state and pc-offset as a BitField. // Only use 30 bits because we encode the result as a smi. class StateField : public BitField { }; @@ -113,6 +110,8 @@ class FullCodeGenerator: public AstVisitor { return NULL; } + Zone* zone() const { return zone_; } + private: class Breakable; class Iteration; @@ -207,7 +206,7 @@ class FullCodeGenerator: public AstVisitor { virtual ~NestedBlock() {} virtual NestedStatement* Exit(int* stack_depth, int* context_length) { - if (statement()->AsBlock()->block_scope() != NULL) { + if (statement()->AsBlock()->scope() != NULL) { ++(*context_length); } return previous_; @@ -241,7 +240,7 @@ class FullCodeGenerator: public AstVisitor { // The finally block of a try/finally statement. class Finally : public NestedStatement { public: - static const int kElementCount = 2; + static const int kElementCount = 5; explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { } virtual ~Finally() {} @@ -418,12 +417,9 @@ class FullCodeGenerator: public AstVisitor { Label* if_true, Label* if_false); - // Platform-specific code for a variable, constant, or function - // declaration. Functions have an initial value. - // Increments global_count_ for unallocated variables. - void EmitDeclaration(VariableProxy* proxy, - VariableMode mode, - FunctionLiteral* function); + // If enabled, emit debug code for checking that the current context is + // neither a with nor a catch context. + void EmitDebugCheckDeclarationContext(Variable* variable); // Platform-specific code for checking the stack limit at the back edge of // a loop. @@ -553,12 +549,8 @@ class FullCodeGenerator: public AstVisitor { Handle