Browse Source

deps: update V8 to 5.7.492.69

PR-URL: https://github.com/nodejs/node/pull/11752
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
v6
Michaël Zasso 8 years ago
parent
commit
c459d8ea5d
  1. 1
      deps/v8/.clang-format
  2. 31
      deps/v8/.gn
  3. 2
      deps/v8/AUTHORS
  4. 197
      deps/v8/BUILD.gn
  5. 2490
      deps/v8/ChangeLog
  6. 22
      deps/v8/DEPS
  7. 7
      deps/v8/OWNERS
  8. 15
      deps/v8/PRESUBMIT.py
  9. 6
      deps/v8/build_overrides/build.gni
  10. 15
      deps/v8/build_overrides/v8.gni
  11. 5
      deps/v8/gni/isolate.gni
  12. 26
      deps/v8/gni/v8.gni
  13. 6
      deps/v8/gypfiles/all.gyp
  14. 1
      deps/v8/gypfiles/get_landmines.py
  15. 2
      deps/v8/gypfiles/toolchain.gypi
  16. 97
      deps/v8/gypfiles/win/msvs_dependencies.isolate
  17. 11
      deps/v8/include/libplatform/libplatform.h
  18. 80
      deps/v8/include/v8-debug.h
  19. 6
      deps/v8/include/v8-inspector.h
  20. 33
      deps/v8/include/v8-version-string.h
  21. 6
      deps/v8/include/v8-version.h
  22. 465
      deps/v8/include/v8.h
  23. 1
      deps/v8/infra/config/cq.cfg
  24. 15
      deps/v8/infra/mb/mb_config.pyl
  25. 2
      deps/v8/src/DEPS
  26. 101
      deps/v8/src/accessors.cc
  27. 1
      deps/v8/src/accessors.h
  28. 17
      deps/v8/src/allocation.cc
  29. 19
      deps/v8/src/allocation.h
  30. 20
      deps/v8/src/api-arguments-inl.h
  31. 15
      deps/v8/src/api-arguments.cc
  32. 2
      deps/v8/src/api-arguments.h
  33. 3
      deps/v8/src/api-experimental.cc
  34. 67
      deps/v8/src/api-natives.cc
  35. 727
      deps/v8/src/api.cc
  36. 3
      deps/v8/src/api.h
  37. 3
      deps/v8/src/arguments.h
  38. 2
      deps/v8/src/arm/assembler-arm-inl.h
  39. 786
      deps/v8/src/arm/assembler-arm.cc
  40. 131
      deps/v8/src/arm/assembler-arm.h
  41. 486
      deps/v8/src/arm/code-stubs-arm.cc
  42. 19
      deps/v8/src/arm/code-stubs-arm.h
  43. 358
      deps/v8/src/arm/codegen-arm.cc
  44. 25
      deps/v8/src/arm/constants-arm.h
  45. 379
      deps/v8/src/arm/disasm-arm.cc
  46. 8
      deps/v8/src/arm/interface-descriptors-arm.cc
  47. 430
      deps/v8/src/arm/macro-assembler-arm.cc
  48. 104
      deps/v8/src/arm/macro-assembler-arm.h
  49. 1201
      deps/v8/src/arm/simulator-arm.cc
  50. 11
      deps/v8/src/arm/simulator-arm.h
  51. 15
      deps/v8/src/arm64/assembler-arm64.cc
  52. 3
      deps/v8/src/arm64/assembler-arm64.h
  53. 452
      deps/v8/src/arm64/code-stubs-arm64.cc
  54. 8
      deps/v8/src/arm64/code-stubs-arm64.h
  55. 290
      deps/v8/src/arm64/codegen-arm64.cc
  56. 12
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  57. 316
      deps/v8/src/arm64/macro-assembler-arm64.cc
  58. 94
      deps/v8/src/arm64/macro-assembler-arm64.h
  59. 1
      deps/v8/src/asmjs/OWNERS
  60. 129
      deps/v8/src/asmjs/asm-js.cc
  61. 4
      deps/v8/src/asmjs/asm-js.h
  62. 407
      deps/v8/src/asmjs/asm-typer.cc
  63. 80
      deps/v8/src/asmjs/asm-typer.h
  64. 1
      deps/v8/src/asmjs/asm-types.cc
  65. 465
      deps/v8/src/asmjs/asm-wasm-builder.cc
  66. 14
      deps/v8/src/asmjs/asm-wasm-builder.h
  67. 32
      deps/v8/src/assembler-inl.h
  68. 74
      deps/v8/src/assembler.cc
  69. 47
      deps/v8/src/assembler.h
  70. 10
      deps/v8/src/assert-scope.cc
  71. 23
      deps/v8/src/assert-scope.h
  72. 6
      deps/v8/src/ast/ast-expression-rewriter.cc
  73. 29
      deps/v8/src/ast/ast-function-literal-id-reindexer.cc
  74. 36
      deps/v8/src/ast/ast-function-literal-id-reindexer.h
  75. 4
      deps/v8/src/ast/ast-literal-reindexer.cc
  76. 119
      deps/v8/src/ast/ast-numbering.cc
  77. 15
      deps/v8/src/ast/ast-numbering.h
  78. 8
      deps/v8/src/ast/ast-traversal-visitor.h
  79. 7
      deps/v8/src/ast/ast-types.cc
  80. 45
      deps/v8/src/ast/ast-value-factory.cc
  81. 129
      deps/v8/src/ast/ast-value-factory.h
  82. 226
      deps/v8/src/ast/ast.cc
  83. 353
      deps/v8/src/ast/ast.h
  84. 4
      deps/v8/src/ast/compile-time-value.cc
  85. 4
      deps/v8/src/ast/compile-time-value.h
  86. 2
      deps/v8/src/ast/modules.cc
  87. 53
      deps/v8/src/ast/prettyprinter.cc
  88. 4
      deps/v8/src/ast/prettyprinter.h
  89. 511
      deps/v8/src/ast/scopes.cc
  90. 118
      deps/v8/src/ast/scopes.h
  91. 1
      deps/v8/src/ast/variables.cc
  92. 9
      deps/v8/src/bailout-reason.h
  93. 3
      deps/v8/src/base.isolate
  94. 10
      deps/v8/src/base/cpu.cc
  95. 1
      deps/v8/src/base/cpu.h
  96. 17
      deps/v8/src/base/hashmap.h
  97. 2
      deps/v8/src/base/iterator.h
  98. 15
      deps/v8/src/base/logging.cc
  99. 105
      deps/v8/src/base/logging.h
  100. 19
      deps/v8/src/base/macros.h

1
deps/v8/.clang-format

@ -1,4 +1,5 @@
# Defines the Google C++ style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Google
DerivePointerAlignment: false
MaxEmptyLinesToKeep: 1

31
deps/v8/.gn

@ -2,6 +2,8 @@
# tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line.
import("//build/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
@ -19,30 +21,5 @@ check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls.
exec_script_whitelist = [
"//build/config/android/BUILD.gn",
"//build/config/android/config.gni",
"//build/config/android/internal_rules.gni",
"//build/config/android/rules.gni",
"//build/config/BUILD.gn",
"//build/config/compiler/BUILD.gn",
"//build/config/gcc/gcc_version.gni",
"//build/config/ios/ios_sdk.gni",
"//build/config/linux/atk/BUILD.gn",
"//build/config/linux/BUILD.gn",
"//build/config/linux/pkg_config.gni",
"//build/config/mac/mac_sdk.gni",
"//build/config/posix/BUILD.gn",
"//build/config/sysroot.gni",
"//build/config/win/BUILD.gn",
"//build/config/win/visual_studio_version.gni",
"//build/gn_helpers.py",
"//build/gypi_to_gn.py",
"//build/toolchain/concurrent_links.gni",
"//build/toolchain/gcc_toolchain.gni",
"//build/toolchain/mac/BUILD.gn",
"//build/toolchain/win/BUILD.gn",
"//build/util/branding.gni",
"//build/util/version.gni",
"//test/test262/BUILD.gn",
]
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist + [ "//test/test262/BUILD.gn" ]

2
deps/v8/AUTHORS

@ -81,6 +81,7 @@ Julien Brianceau <jbriance@cisco.com>
JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com>
@ -104,6 +105,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Paul Lind <plind44@gmail.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>

197
deps/v8/BUILD.gn

@ -14,8 +14,6 @@ if (is_android) {
import("gni/v8.gni")
import("gni/isolate.gni")
import("//build_overrides/v8.gni")
import("snapshot_toolchain.gni")
declare_args() {
@ -23,7 +21,10 @@ declare_args() {
v8_android_log_stdout = false
# Sets -DVERIFY_HEAP.
v8_enable_verify_heap = false
v8_enable_verify_heap = ""
# Sets -DVERIFY_PREDICTABLE
v8_enable_verify_predictable = false
# Enable compiler warnings when using V8_DEPRECATED apis.
v8_deprecation_warnings = false
@ -51,7 +52,13 @@ declare_args() {
v8_interpreted_regexp = false
# Sets -dOBJECT_PRINT.
v8_object_print = ""
v8_enable_object_print = ""
# Sets -dTRACE_MAPS.
v8_enable_trace_maps = ""
# Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = ""
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
@ -89,11 +96,20 @@ if (v8_enable_gdbjit == "") {
}
# Derived defaults.
if (v8_object_print == "") {
v8_object_print = is_debug && !v8_optimized_debug
if (v8_enable_verify_heap == "") {
v8_enable_verify_heap = is_debug
}
if (v8_enable_object_print == "") {
v8_enable_object_print = is_debug
}
if (v8_enable_disassembler == "") {
v8_enable_disassembler = is_debug && !v8_optimized_debug
v8_enable_disassembler = is_debug
}
if (v8_enable_trace_maps == "") {
v8_enable_trace_maps = is_debug
}
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = is_debug
}
# Specifies if the target build is a simulator build. Comparing target cpu
@ -155,7 +171,7 @@ config("external_config") {
defines = [ "USING_V8_SHARED" ]
}
include_dirs = [ "include" ]
if (v8_enable_inspector_override) {
if (v8_enable_inspector) {
include_dirs += [ "$target_gen_dir/include" ]
}
}
@ -179,12 +195,21 @@ config("features") {
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
if (v8_object_print) {
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
if (v8_enable_verify_heap) {
defines += [ "VERIFY_HEAP" ]
}
if (v8_enable_verify_predictable) {
defines += [ "VERIFY_PREDICTABLE" ]
}
if (v8_enable_trace_maps) {
defines += [ "TRACE_MAPS" ]
}
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
if (v8_interpreted_regexp) {
defines += [ "V8_INTERPRETED_REGEXP" ]
}
@ -348,15 +373,7 @@ config("toolchain") {
ldflags += [ "-rdynamic" ]
}
# TODO(jochen): Add support for different debug optimization levels.
defines += [
"ENABLE_DISASSEMBLER",
"V8_ENABLE_CHECKS",
"OBJECT_PRINT",
"VERIFY_HEAP",
"DEBUG",
"TRACE_MAPS",
]
defines += [ "DEBUG" ]
if (v8_enable_slow_dchecks) {
defines += [ "ENABLE_SLOW_DCHECKS" ]
}
@ -408,7 +425,6 @@ action("js2c") {
"src/js/prologue.js",
"src/js/runtime.js",
"src/js/v8natives.js",
"src/js/symbol.js",
"src/js/array.js",
"src/js/string.js",
"src/js/arraybuffer.js",
@ -422,6 +438,7 @@ action("js2c") {
"src/js/spread.js",
"src/js/proxy.js",
"src/js/async-await.js",
"src/js/harmony-string-padding.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@ -466,7 +483,6 @@ action("js2c_experimental") {
"src/messages.h",
"src/js/harmony-atomics.js",
"src/js/harmony-simd.js",
"src/js/harmony-string-padding.js",
]
outputs = [
@ -742,7 +758,7 @@ action("v8_dump_build_config") {
"is_tsan=$is_tsan",
"target_cpu=\"$target_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_inspector=$v8_enable_inspector_override",
"v8_enable_inspector=$v8_enable_inspector",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
@ -848,6 +864,17 @@ if (v8_use_external_startup_data) {
}
}
# This is split out to be a non-code containing target that the Chromium browser
# DLL can depend upon to get only a version string.
v8_source_set("v8_version") {
configs = [ ":internal_config" ]
sources = [
"include/v8-version-string.h",
"include/v8-version.h",
]
}
v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -861,7 +888,6 @@ v8_source_set("v8_base") {
"include/v8-profiler.h",
"include/v8-testing.h",
"include/v8-util.h",
"include/v8-version.h",
"include/v8.h",
"include/v8config.h",
"src/accessors.cc",
@ -893,12 +919,15 @@ v8_source_set("v8_base") {
"src/asmjs/asm-wasm-builder.h",
"src/asmjs/switch-logic.cc",
"src/asmjs/switch-logic.h",
"src/assembler-inl.h",
"src/assembler.cc",
"src/assembler.h",
"src/assert-scope.cc",
"src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-literal-reindexer.cc",
"src/ast/ast-literal-reindexer.h",
"src/ast/ast-numbering.cc",
@ -919,7 +948,6 @@ v8_source_set("v8_base") {
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
"src/ast/scopeinfo.cc",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/variables.cc",
@ -944,6 +972,8 @@ v8_source_set("v8_base") {
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-constructor.cc",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion.cc",
"src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc",
@ -953,14 +983,15 @@ v8_source_set("v8_base") {
"src/builtins/builtins-generator.cc",
"src/builtins/builtins-global.cc",
"src/builtins/builtins-handler.cc",
"src/builtins/builtins-ic.cc",
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-iterator.cc",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
@ -1002,6 +1033,8 @@ v8_source_set("v8_base") {
"src/compiler-dispatcher/compiler-dispatcher-job.h",
"src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
"src/compiler-dispatcher/compiler-dispatcher-tracer.h",
"src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler.cc",
@ -1020,12 +1053,12 @@ v8_source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc",
"src/compiler/branch-elimination.h",
"src/compiler/bytecode-branch-analysis.cc",
"src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-analysis.cc",
"src/compiler/bytecode-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
"src/compiler/bytecode-loop-analysis.cc",
"src/compiler/bytecode-loop-analysis.h",
"src/compiler/bytecode-liveness-map.cc",
"src/compiler/bytecode-liveness-map.h",
"src/compiler/c-linkage.cc",
"src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h",
@ -1065,6 +1098,8 @@ v8_source_set("v8_base") {
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-assembler.cc",
"src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
@ -1196,8 +1231,6 @@ v8_source_set("v8_base") {
"src/compiler/tail-call-optimization.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-hint-analyzer.cc",
"src/compiler/type-hint-analyzer.h",
"src/compiler/typed-optimization.cc",
"src/compiler/typed-optimization.h",
"src/compiler/typer.cc",
@ -1300,6 +1333,7 @@ v8_source_set("v8_base") {
"src/debug/debug-scopes.h",
"src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/interface-types.h",
"src/debug/liveedit.cc",
"src/debug/liveedit.h",
"src/deoptimize-reason.cc",
@ -1373,6 +1407,8 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
@ -1414,6 +1450,9 @@ v8_source_set("v8_base") {
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/accessor-assembler-impl.h",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
@ -1425,6 +1464,8 @@ v8_source_set("v8_base") {
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
"src/ic/ic-stats.cc",
"src/ic/ic-stats.h",
"src/ic/ic.cc",
"src/ic/ic.h",
"src/ic/keyed-store-generic.cc",
@ -1437,10 +1478,14 @@ v8_source_set("v8_base") {
"src/identity-map.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/bytecode-array-accessor.cc",
"src/interpreter/bytecode-array-accessor.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-array-random-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.h",
"src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-dead-code-optimizer.cc",
@ -1509,6 +1554,8 @@ v8_source_set("v8_base") {
"src/machine-type.cc",
"src/machine-type.h",
"src/macro-assembler.h",
"src/map-updater.cc",
"src/map-updater.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
@ -1519,6 +1566,11 @@ v8_source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
"src/objects/module-info.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/duplicate-finder.cc",
@ -1533,6 +1585,8 @@ v8_source_set("v8_base") {
"src/parsing/parser-base.h",
"src/parsing/parser.cc",
"src/parsing/parser.h",
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
"src/parsing/preparse-data-format.h",
"src/parsing/preparse-data.cc",
@ -1578,8 +1632,6 @@ v8_source_set("v8_base") {
"src/profiler/tracing-cpu-profiler.h",
"src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h",
"src/promise-utils.cc",
"src/promise-utils.h",
"src/property-descriptor.cc",
"src/property-descriptor.h",
"src/property-details.h",
@ -1679,6 +1731,8 @@ v8_source_set("v8_base") {
"src/startup-data-util.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-case.cc",
"src/string-case.h",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
@ -1693,6 +1747,7 @@ v8_source_set("v8_base") {
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
"src/trap-handler/trap-handler.h",
"src/type-feedback-vector-inl.h",
"src/type-feedback-vector.cc",
"src/type-feedback-vector.h",
@ -1724,9 +1779,9 @@ v8_source_set("v8_base") {
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/ast-decoder.cc",
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h",
"src/wasm/function-body-decoder.cc",
"src/wasm/function-body-decoder.h",
"src/wasm/leb-helper.h",
"src/wasm/managed.h",
"src/wasm/module-decoder.cc",
@ -1740,6 +1795,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
"src/wasm/wasm-macro-gen.h",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h",
@ -1751,12 +1807,15 @@ v8_source_set("v8_base") {
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/zone-allocator.h",
"src/zone/zone-allocator.h",
"src/zone/zone-chunk-list.h",
"src/zone/zone-containers.h",
"src/zone/zone-handle-set.h",
"src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
"src/zone/zone.cc",
@ -1797,9 +1856,7 @@ v8_source_set("v8_base") {
"src/ia32/simulator-ia32.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
@ -1822,9 +1879,7 @@ v8_source_set("v8_base") {
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
@ -1889,8 +1944,6 @@ v8_source_set("v8_base") {
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc",
"src/ic/arm/ic-compiler-arm.cc",
"src/ic/arm/stub-cache-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
]
@ -1948,8 +2001,6 @@ v8_source_set("v8_base") {
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc",
"src/ic/arm64/ic-compiler-arm64.cc",
"src/ic/arm64/stub-cache-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
@ -1970,9 +2021,7 @@ v8_source_set("v8_base") {
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
@ -2012,9 +2061,7 @@ v8_source_set("v8_base") {
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
@ -2054,9 +2101,7 @@ v8_source_set("v8_base") {
"src/full-codegen/ppc/full-codegen-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc",
"src/ic/ppc/handler-compiler-ppc.cc",
"src/ic/ppc/ic-compiler-ppc.cc",
"src/ic/ppc/ic-ppc.cc",
"src/ic/ppc/stub-cache-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
@ -2096,9 +2141,7 @@ v8_source_set("v8_base") {
"src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc",
"src/ic/s390/ic-compiler-s390.cc",
"src/ic/s390/ic-s390.cc",
"src/ic/s390/stub-cache-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
@ -2138,9 +2181,7 @@ v8_source_set("v8_base") {
"src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc",
"src/ic/x87/handler-compiler-x87.cc",
"src/ic/x87/ic-compiler-x87.cc",
"src/ic/x87/ic-x87.cc",
"src/ic/x87/stub-cache-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.h",
"src/x87/assembler-x87-inl.h",
@ -2169,6 +2210,7 @@ v8_source_set("v8_base") {
deps = [
":v8_libbase",
":v8_libsampler",
":v8_version",
]
sources += [ v8_generated_peephole_source ]
@ -2196,7 +2238,7 @@ v8_source_set("v8_base") {
deps += [ ":postmortem-metadata" ]
}
if (v8_enable_inspector_override) {
if (v8_enable_inspector) {
deps += [ "src/inspector:inspector" ]
}
}
@ -2399,14 +2441,10 @@ v8_source_set("fuzzer_support") {
":v8_libbase",
":v8_libplatform",
]
}
v8_source_set("simple_fuzzer") {
sources = [
"test/fuzzer/fuzzer.cc",
]
configs = [ ":internal_config_base" ]
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
}
}
###############################################################################
@ -2477,14 +2515,10 @@ group("gn_all") {
deps = [
":d8",
":v8_fuzzers",
":v8_hello_world",
":v8_parser_shell",
":v8_sample_process",
":v8_simple_json_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_fuzzer",
"test:gn_all",
"tools:gn_all",
]
@ -2498,6 +2532,26 @@ group("gn_all") {
}
}
group("v8_fuzzers") {
testonly = true
deps = [
":v8_simple_json_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_data_section_fuzzer",
":v8_simple_wasm_function_sigs_section_fuzzer",
":v8_simple_wasm_fuzzer",
":v8_simple_wasm_globals_section_fuzzer",
":v8_simple_wasm_imports_section_fuzzer",
":v8_simple_wasm_memory_section_fuzzer",
":v8_simple_wasm_names_section_fuzzer",
":v8_simple_wasm_types_section_fuzzer",
]
}
if (is_component_build) {
v8_component("v8") {
sources = [
@ -2527,6 +2581,7 @@ if (is_component_build) {
":v8_base",
":v8_maybe_snapshot",
]
public_configs = [ ":external_config" ]
}
}
@ -2566,8 +2621,12 @@ v8_executable("d8") {
deps += [ "//third_party/icu" ]
}
if (v8_correctness_fuzzer) {
deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
}
defines = []
if (v8_enable_inspector_override) {
if (v8_enable_inspector) {
defines += [ "V8_INSPECTOR_ENABLED" ]
}
}
@ -2687,10 +2746,14 @@ template("v8_fuzzer") {
v8_executable("v8_simple_" + name) {
deps = [
":" + name,
":simple_fuzzer",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
sources = [
"test/fuzzer/fuzzer.cc",
]
configs = [ ":external_config" ]
}
}

2490
deps/v8/ChangeLog

File diff suppressed because it is too large

22
deps/v8/DEPS

@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "a3b623a6eff6dc9d58a03251ae22bccf92f67cb2",
Var("chromium_url") + "/chromium/src/build.git" + "@" + "f55127ddc3632dbd6fef285c71ab4cb103e08f0c",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c1a237113f525a1561d4b322d7653e1083f79aaa",
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "9cd2828740572ba6f694b9365236a8356fd06147",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a",
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474",
"v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "39b1db2ab4aa4b2ccaa263c29bdf63e7c1ee28aa",
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "cb12d6e8641f0c9b0fbbfa4bf17c55c6c0d3c38f",
"v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2":
@ -24,7 +24,7 @@ deps = {
"v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
"v8/tools/swarming_client":
Var('chromium_url') + '/external/swarming.client.git' + '@' + "380e32662312eb107f06fcba6409b0409f8fef72",
Var('chromium_url') + '/external/swarming.client.git' + '@' + "ebc8dab6f8b8d79ec221c94de39a921145abd404",
"v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -35,19 +35,19 @@ deps = {
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "fb61ab44eb1bbc2699d714fc00e33af2a19411ce",
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "6a0f1189eb00d38ef9760cb65cbc41c066876cde",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "75350a858c51ad69e2aae051a8727534542da29f",
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "f7ce1a5678e5addc015aed5f1e7734bbd2caac7c",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf",
Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc",
"v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "6962f5c0344a79b152bf84460a93e1b2e11ea0f4",
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "143ba4ddeb05e6165fb8413c5f3f47d342922d24",
},
"win": {
"v8/third_party/cygwin":
@ -263,7 +263,7 @@ hooks = [
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'pattern': '.',
'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'],
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
},
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.

7
deps/v8/OWNERS

@ -5,14 +5,19 @@ binji@chromium.org
bmeurer@chromium.org
bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
epertoso@chromium.org
franzih@chromium.org
gsathya@chromium.org
hablich@chromium.org
hpayer@chromium.org
ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
jochen@chromium.org
leszeks@chromium.org
littledan@chromium.org
machenbach@chromium.org
marja@chromium.org
@ -21,9 +26,11 @@ mstarzinger@chromium.org
mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
petermarshall@chromium.org
neis@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
tebbi@chromium.org
titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org

15
deps/v8/PRESUBMIT.py

@ -67,19 +67,22 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckAuthorizedAuthor
from presubmit import CheckStatusFiles
from presubmit import StatusFilesProcessor
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckStatusFiles(input_api.PresubmitLocalPath()):
if not StatusFilesProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api))
return results

6
deps/v8/build_overrides/build.gni

@ -24,3 +24,9 @@ linux_use_bundled_binutils_override = true
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
# Skip assertions about 4GiB file size limit.
ignore_elf32_limitations = true
# Use the system install of Xcode for tools like ibtool, libtool, etc.
use_system_xcode = true

15
deps/v8/build_overrides/v8.gni

@ -2,14 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//build/config/v8_target_cpu.gni")
import("//gni/v8.gni")
if (is_android) {
import("//build/config/android/config.gni")
}
if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
@ -24,9 +17,9 @@ v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_inspector_override = true
declare_args() {
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = true
# Use static libraries instead of source_sets.
v8_static_library = false
}
v8_enable_inspector_override = v8_enable_inspector

5
deps/v8/gni/isolate.gni

@ -3,7 +3,6 @@
# found in the LICENSE file.
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/v8.gni")
import("//third_party/icu/config.gni")
import("v8.gni")
@ -97,7 +96,7 @@ template("v8_isolate_run") {
} else {
icu_use_data_file_flag = "0"
}
if (v8_enable_inspector_override) {
if (v8_enable_inspector) {
enable_inspector = "1"
} else {
enable_inspector = "0"
@ -181,7 +180,7 @@ template("v8_isolate_run") {
if (is_win) {
args += [
"--config-variable",
"msvs_version=2013",
"msvs_version=2015",
]
} else {
args += [

26
deps/v8/gni/v8.gni

@ -4,8 +4,12 @@
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("//build_overrides/v8.gni")
declare_args() {
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
# Indicate if valgrind was fetched as a custom deps to make it available on
# swarming.
v8_has_valgrind = false
@ -30,6 +34,9 @@ declare_args() {
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
v8_enable_i18n_support = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = v8_enable_inspector_override
}
if (v8_use_external_startup_data == "") {
@ -83,11 +90,20 @@ if (is_posix && v8_enable_backtrace) {
# All templates should be kept in sync.
template("v8_source_set") {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
if (defined(v8_static_library) && v8_static_library) {
static_library(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
} else {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}
}

6
deps/v8/gypfiles/all.gyp

@ -27,10 +27,14 @@
}],
['v8_enable_inspector==1', {
'dependencies': [
'../test/debugger/debugger.gyp:*',
'../test/inspector/inspector.gyp:*',
],
}],
['v8_enable_inspector==1 and test_isolation_mode != "noop"', {
'dependencies': [
'../test/debugger/debugger.gyp:*',
],
}],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',

1
deps/v8/gypfiles/get_landmines.py

@ -31,6 +31,7 @@ def main():
print 'Clober to fix windows build problems.'
print 'Clober again to fix windows build problems.'
print 'Clobber to possibly resolve failure on win-32 bot.'
print 'Clobber for http://crbug.com/668958.'
return 0

2
deps/v8/gypfiles/toolchain.gypi

@ -989,6 +989,8 @@
# present in VS 2003 and earlier.
'msvs_disabled_warnings': [4351],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}],

97
deps/v8/gypfiles/win/msvs_dependencies.isolate

@ -0,0 +1,97 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(machenbach): Remove this when crbug.com/669910 is resolved.
{
'conditions': [
# Copy the VS runtime DLLs into the isolate so that they
# don't have to be preinstalled on the target machine.
#
# VS2013 runtimes
['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp120d.dll',
'<(PRODUCT_DIR)/msvcr120d.dll',
],
},
}],
['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp120.dll',
'<(PRODUCT_DIR)/msvcr120.dll',
],
},
}],
# VS2015 runtimes
['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140d.dll',
'<(PRODUCT_DIR)/vccorlib140d.dll',
'<(PRODUCT_DIR)/vcruntime140d.dll',
'<(PRODUCT_DIR)/ucrtbased.dll',
],
},
}],
['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140.dll',
'<(PRODUCT_DIR)/vccorlib140.dll',
'<(PRODUCT_DIR)/vcruntime140.dll',
'<(PRODUCT_DIR)/ucrtbase.dll',
],
},
}],
['OS=="win" and msvs_version==2015 and component=="shared_library"', {
# Windows 10 Universal C Runtime binaries.
'variables': {
'files': [
'<(PRODUCT_DIR)/api-ms-win-core-console-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-datetime-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-debug-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-errorhandling-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l2-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-handle-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-heap-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-interlocked-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-libraryloader-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-localization-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-memory-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-namedpipe-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processenvironment-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-1.dll',
'<(PRODUCT_DIR)/api-ms-win-core-profile-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-rtlsupport-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-string-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-synch-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-synch-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-sysinfo-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-timezone-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-util-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-conio-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-convert-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-environment-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-filesystem-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-heap-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-locale-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-math-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-multibyte-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-private-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-process-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-runtime-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-stdio-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-string-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-time-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-utility-l1-1-0.dll',
],
},
}],
],
}

11
deps/v8/include/libplatform/libplatform.h

@ -34,6 +34,17 @@ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform,
v8::Isolate* isolate);
/**
* Runs pending idle tasks for at most |idle_time_in_seconds| seconds.
*
* The caller has to make sure that this is called from the right thread.
* This call does not block if no task is pending. The |platform| has to be
* created using |CreateDefaultPlatform|.
*/
V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
v8::Isolate* isolate,
double idle_time_in_seconds);
/**
* Attempts to set the tracing controller for the given platform.
*

80
deps/v8/include/v8-debug.h

@ -16,11 +16,9 @@ namespace v8 {
enum DebugEvent {
Break = 1,
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
CompileError = 6,
AsyncTaskEvent = 7,
AfterCompile = 3,
CompileError = 4,
AsyncTaskEvent = 5,
};
class V8_EXPORT Debug {
@ -87,7 +85,6 @@ class V8_EXPORT Debug {
virtual ~Message() {}
};
/**
* An event details object passed to the debug event listener.
*/
@ -145,7 +142,7 @@ class V8_EXPORT Debug {
*
* \param message the debug message handler message object
*
* A MessageHandler2 does not take possession of the message data,
* A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler)(const Message& message);
@ -167,33 +164,37 @@ class V8_EXPORT Debug {
static void CancelDebugBreak(Isolate* isolate);
// Check if a debugger break is scheduled in the given isolate.
static bool CheckDebugBreak(Isolate* isolate);
V8_DEPRECATED("No longer supported",
static bool CheckDebugBreak(Isolate* isolate));
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
ClientData* client_data = NULL);
/**
* Run a JavaScript function in the debugger.
* \param fun the function to call
* \param data passed as second argument to the function
* With this call the debugger is entered and the function specified is called
* with the execution state as the first argument. This makes it possible to
* get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change.
* The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
*
* \code
* function frame_source_line(exec_state) {
* return exec_state.frame(0).sourceLine();
* }
* \endcode
*/
V8_DEPRECATED("No longer supported",
static void SetMessageHandler(Isolate* isolate,
MessageHandler handler));
V8_DEPRECATED("No longer supported",
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
ClientData* client_data = NULL));
/**
* Run a JavaScript function in the debugger.
* \param fun the function to call
* \param data passed as second argument to the function
* With this call the debugger is entered and the function specified is called
* with the execution state as the first argument. This makes it possible to
* get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change.
* The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
*
* \code
* function frame_source_line(exec_state) {
* return exec_state.frame(0).sourceLine();
* }
* \endcode
*/
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@ -202,8 +203,9 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
V8_DEPRECATED("No longer supported",
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj));
/**
* Makes V8 process all pending debug messages.
@ -236,7 +238,8 @@ class V8_EXPORT Debug {
* "Evaluate" debug command behavior currently is not specified in scope
* of this method.
*/
static void ProcessDebugMessages(Isolate* isolate);
V8_DEPRECATED("No longer supported",
static void ProcessDebugMessages(Isolate* isolate));
/**
* Debugger is running in its own context which is entered while debugger
@ -245,13 +248,16 @@ class V8_EXPORT Debug {
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext(Isolate* isolate);
V8_DEPRECATED("Use v8-inspector",
static Local<Context> GetDebugContext(Isolate* isolate));
/**
* While in the debug context, this method returns the top-most non-debug
* context, if it exists.
*/
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate);
V8_DEPRECATED(
"No longer supported",
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate));
/**
* Enable/disable LiveEdit functionality for the given Isolate

6
deps/v8/include/v8-inspector.h

@ -248,9 +248,9 @@ class V8_EXPORT V8Inspector {
class V8_EXPORT Channel {
public:
virtual ~Channel() {}
virtual void sendProtocolResponse(int callId,
const StringView& message) = 0;
virtual void sendProtocolNotification(const StringView& message) = 0;
virtual void sendResponse(int callId,
std::unique_ptr<StringBuffer> message) = 0;
virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
virtual void flushProtocolNotifications() = 0;
};
virtual std::unique_ptr<V8InspectorSession> connect(

33
deps/v8/include/v8-version-string.h

@ -0,0 +1,33 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_VERSION_STRING_H_
#define V8_VERSION_STRING_H_
#include "v8-version.h" // NOLINT(build/include)
// This is here rather than v8-version.h to keep that file simple and
// machine-processable.
#if V8_IS_CANDIDATE_VERSION
#define V8_CANDIDATE_STRING " (candidate)"
#else
#define V8_CANDIDATE_STRING ""
#endif
#define V8_SX(x) #x
#define V8_S(x) V8_SX(x)
#if V8_PATCH_LEVEL > 0
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) "." V8_S( \
V8_PATCH_LEVEL) V8_CANDIDATE_STRING
#else
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) V8_CANDIDATE_STRING
#endif
#endif // V8_VERSION_STRING_H_

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 6
#define V8_BUILD_NUMBER 326
#define V8_PATCH_LEVEL 57
#define V8_MINOR_VERSION 7
#define V8_BUILD_NUMBER 492
#define V8_PATCH_LEVEL 69
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

465
deps/v8/include/v8.h

File diff suppressed because it is too large

1
deps/v8/infra/config/cq.cfg

@ -4,7 +4,6 @@
version: 1
cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
hide_ref_in_committed_msg: true
commit_burst_delay: 60
max_commit_burst: 1
target_ref: "refs/pending/heads/master"

15
deps/v8/infra/mb/mb_config.pyl

@ -66,6 +66,7 @@
'V8 Linux64 TSAN': 'gn_release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz.
'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
'V8 Linux64 ASAN no inline - release builder':
'gn_release_x64_asan_symbolized_edge_verify_heap',
'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
@ -116,8 +117,7 @@
'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
# X87.
'V8 Linux - x87 - nosnap - debug builder':
'gyp_debug_simulate_x87_no_snap',
'V8 Linux - x87 - nosnap - debug builder': 'gyp_debug_simulate_x87',
},
'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86',
@ -286,6 +286,8 @@
'v8_verify_heap'],
'gn_release_x64_clang': [
'gn', 'release_bot', 'x64', 'clang', 'swarming'],
'gn_release_x64_correctness_fuzzer' : [
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
'gn_release_x64_minimal_symbols': [
@ -359,9 +361,8 @@
'gn', 'release_trybot', 'x86', 'swarming'],
# Gyp debug configs for simulators.
'gyp_debug_simulate_x87_no_snap': [
'gyp', 'debug_bot_static', 'simulate_x87', 'swarming',
'v8_snapshot_none'],
'gyp_debug_simulate_x87': [
'gyp', 'debug_bot_static', 'simulate_x87', 'swarming'],
# Gyp debug configs for x86.
'gyp_debug_x86': [
@ -626,6 +627,10 @@
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
},
'v8_correctness_fuzzer': {
'gn_args': 'v8_correctness_fuzzer=true',
},
'v8_disable_inspector': {
'gn_args': 'v8_enable_inspector=false',
'gyp_defines': 'v8_enable_inspector=0 ',

2
deps/v8/src/DEPS

@ -10,7 +10,9 @@ include_rules = [
"+src/heap/heap-inl.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
"+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecode-array-random-iterator.h",
"+src/interpreter/bytecode-decoder.h",
"+src/interpreter/bytecode-flags.h",
"+src/interpreter/bytecode-register.h",

101
deps/v8/src/accessors.cc

@ -167,16 +167,38 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
bool was_readonly = JSArray::HasReadOnlyLength(array);
uint32_t length = 0;
if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
isolate->OptionalRescheduleException(false);
return;
}
if (!was_readonly && V8_UNLIKELY(JSArray::HasReadOnlyLength(array)) &&
length != array->length()->Number()) {
// AnythingToArrayLength() may have called setter re-entrantly and modified
// its property descriptor. Don't perform this check if "length" was
// previously readonly, as this may have been called during
// DefineOwnPropertyIgnoreAttributes().
if (info.ShouldThrowOnError()) {
Factory* factory = isolate->factory();
isolate->Throw(*factory->NewTypeError(
MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
i::Object::TypeOf(isolate, object), object));
isolate->OptionalRescheduleException(false);
} else {
info.GetReturnValue().Set(false);
}
return;
}
JSArray::SetLength(array, length);
uint32_t actual_new_len = 0;
@ -517,34 +539,6 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
}
//
// Accessors::ScriptIsEmbedderDebugScript
//
void Accessors::ScriptIsEmbedderDebugScriptGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("is_debugger_script")));
return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
nullptr, attributes);
}
//
// Accessors::ScriptGetContextData
//
@ -829,8 +823,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deopt because we might alias
// an object that was eliminated by escape analysis.
// If we materialize any object, we should deoptimize the frame because we
// might alias an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
array->set(i, *value);
@ -839,7 +833,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
arguments->set_elements(*array);
if (should_deoptimize) {
translated_values.StoreMaterializedValuesAndDeopt();
translated_values.StoreMaterializedValuesAndDeopt(frame);
}
// Return the freshly allocated arguments object.
@ -850,10 +844,10 @@ static Handle<Object> ArgumentsForInlinedFunction(
static int FindFunctionInFrame(JavaScriptFrame* frame,
Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation;
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
if (functions[i] == *function) return i;
List<FrameSummary> frames(2);
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
if (*frames[i].AsJavaScript().function() == *function) return i;
}
return -1;
}
@ -957,19 +951,16 @@ static inline bool AllowAccessToFunction(Context* current_context,
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: isolate_(isolate),
frame_iterator_(isolate),
functions_(2),
index_(0) {
GetFunctions();
: isolate_(isolate), frame_iterator_(isolate), frames_(2), index_(0) {
GetFrames();
}
JSFunction* next() {
while (true) {
if (functions_.length() == 0) return NULL;
JSFunction* next_function = functions_[index_];
if (frames_.length() == 0) return NULL;
JSFunction* next_function = *frames_[index_].AsJavaScript().function();
index_--;
if (index_ < 0) {
GetFunctions();
GetFrames();
}
// Skip functions from other origins.
if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
@ -990,18 +981,18 @@ class FrameFunctionIterator {
}
private:
void GetFunctions() {
functions_.Rewind(0);
void GetFrames() {
frames_.Rewind(0);
if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame();
frame->GetFunctions(&functions_);
DCHECK(functions_.length() > 0);
frame->Summarize(&frames_);
DCHECK(frames_.length() > 0);
frame_iterator_.Advance();
index_ = functions_.length() - 1;
index_ = frames_.length() - 1;
}
Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_;
List<JSFunction*> functions_;
List<FrameSummary> frames_;
int index_;
};
@ -1025,10 +1016,11 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (caller == NULL) return MaybeHandle<JSFunction>();
} while (caller->shared()->is_toplevel());
// If caller is a built-in function and caller's caller is also built-in,
// If caller is not user code and caller's caller is also not user code,
// use that instead.
JSFunction* potential_caller = caller;
while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) {
while (potential_caller != NULL &&
!potential_caller->shared()->IsUserJavaScript()) {
caller = potential_caller;
potential_caller = it.next();
}
@ -1210,7 +1202,8 @@ void Accessors::ErrorStackGetter(
// If stack is still an accessor (this could have changed in the meantime
// since FormatStackTrace can execute arbitrary JS), replace it with a data
// property.
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<Object> receiver =
Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
Handle<Name> name = Utils::OpenHandle(*key);
if (IsAccessor(receiver, name, holder)) {
result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
@ -1236,8 +1229,8 @@ void Accessors::ErrorStackSetter(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> obj =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<JSObject> obj = Handle<JSObject>::cast(
Utils::OpenHandle(*v8::Local<v8::Value>(info.This())));
// Clear internal properties to avoid memory leaks.
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();

1
deps/v8/src/accessors.h

@ -43,7 +43,6 @@ class AccessorInfo;
V(ScriptType) \
V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \
V(ScriptIsEmbedderDebugScript) \
V(StringLength)
#define ACCESSOR_SETTER_LIST(V) \

17
deps/v8/src/allocation.cc

@ -32,23 +32,6 @@ void Malloced::Delete(void* p) {
}
#ifdef DEBUG
static void* invalid = static_cast<void*>(NULL);
void* Embedded::operator new(size_t size) {
UNREACHABLE();
return invalid;
}
void Embedded::operator delete(void* p) {
UNREACHABLE();
}
#endif
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);

19
deps/v8/src/allocation.h

@ -26,24 +26,9 @@ class V8_EXPORT_PRIVATE Malloced {
static void Delete(void* p);
};
// A macro is used for defining the base class used for embedded instances.
// The reason is some compilers allocate a minimum of one word for the
// superclass. The macro prevents the use of new & delete in debug mode.
// In release mode we are not willing to pay this overhead.
#ifdef DEBUG
// Superclass for classes with instances allocated inside stack
// activations or inside other objects.
class Embedded {
public:
void* operator new(size_t size);
void operator delete(void* p);
};
#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
#else
// DEPRECATED
// TODO(leszeks): Delete this during a quiet period
#define BASE_EMBEDDED
#endif
// Superclass for classes only using static method functions.

20
deps/v8/src/api-arguments-inl.h

@ -10,6 +10,14 @@
namespace v8 {
namespace internal {
#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
do { \
if (ISOLATE->needs_side_effect_check() && \
!PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
return Handle<RETURN_TYPE>(); \
} \
} while (false)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
@ -19,6 +27,7 @@ namespace internal {
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@ -43,6 +52,7 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
uint32_t index) { \
Isolate* isolate = this->isolate(); \
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@ -62,6 +72,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
@ -77,6 +88,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertyDefinerCallback f, Handle<Name> name,
const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate);
@ -92,6 +104,7 @@ Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::IndexedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
@ -107,6 +120,7 @@ Handle<Object> PropertyCallbackArguments::Call(
IndexedPropertyDefinerCallback f, uint32_t index,
const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate);
@ -121,6 +135,10 @@ Handle<Object> PropertyCallbackArguments::Call(
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
Handle<Name> name, Handle<Object> value) {
Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
return;
}
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorNameSetterCallback);
VMState<EXTERNAL> state(isolate);
@ -131,5 +149,7 @@ void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
#undef SIDE_EFFECT_CHECK
} // namespace internal
} // namespace v8

15
deps/v8/src/api-arguments.cc

@ -4,6 +4,8 @@
#include "src/api-arguments.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@ -12,6 +14,10 @@ namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@ -23,6 +29,10 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<JSObject>();
}
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@ -31,5 +41,10 @@ Handle<JSObject> PropertyCallbackArguments::Call(
return GetReturnValue<JSObject>(isolate);
}
bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
Address function) {
return isolate->debug()->PerformSideEffectCheckForCallback(function);
}
} // namespace internal
} // namespace v8

2
deps/v8/src/api-arguments.h

@ -136,6 +136,8 @@ class PropertyCallbackArguments
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
bool PerformSideEffectCheck(Isolate* isolate, Address function);
};
class FunctionCallbackArguments

3
deps/v8/src/api-experimental.cc

@ -8,10 +8,11 @@
#include "src/api-experimental.h"
#include "include/v8.h"
#include "include/v8-experimental.h"
#include "include/v8.h"
#include "src/api.h"
#include "src/fast-accessor-assembler.h"
#include "src/objects-inl.h"
namespace {

67
deps/v8/src/api-natives.cc

@ -395,6 +395,28 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
return result;
}
namespace {
MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
Object* function_template) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
InstantiateFunction(
isolate,
handle(FunctionTemplateInfo::cast(function_template), isolate)),
JSFunction);
Handle<Object> instance_prototype;
// TODO(cbruni): decide what to do here.
ASSIGN_RETURN_ON_EXCEPTION(
isolate, instance_prototype,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
return scope.CloseAndEscape(instance_prototype);
}
} // namespace
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
@ -406,11 +428,18 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
return Handle<JSFunction>::cast(result);
}
}
Handle<JSObject> prototype;
Handle<Object> prototype;
if (!data->remove_prototype()) {
Object* prototype_templ = data->prototype_template();
if (prototype_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
Object* protoype_provider_templ = data->prototype_provider_template();
if (protoype_provider_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
GetInstancePrototype(isolate, protoype_provider_templ), JSFunction);
}
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
@ -422,22 +451,12 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
Object* parent = data->parent_template();
if (!parent->IsUndefined(isolate)) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
InstantiateFunction(
isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
JSFunction);
// TODO(dcarney): decide what to do here.
Handle<Object> parent_prototype;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_prototype,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
JSObject::ForceSetPrototype(prototype, parent_prototype);
ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
GetInstancePrototype(isolate, parent),
JSFunction);
JSObject::ForceSetPrototype(Handle<JSObject>::cast(prototype),
parent_prototype);
}
}
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@ -531,7 +550,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -543,7 +562,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -556,7 +575,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -606,7 +625,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
} else {
} else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
JSObject::AddProperty(Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
@ -656,6 +675,12 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark as undetectable if needed.
if (obj->undetectable()) {
// We only allow callable undetectable receivers here, since this whole
// undetectable business is only to support document.all, which is both
// undetectable and callable. If we ever see the need to have an object
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
map->set_is_undetectable();
}

727
deps/v8/src/api.cc

File diff suppressed because it is too large

3
deps/v8/src/api.h

@ -11,7 +11,6 @@
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list.h"
#include "src/objects-inl.h"
namespace v8 {
@ -110,7 +109,7 @@ class RegisteredExtension {
V(StackFrame, JSObject) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
V(DebugInterface::Script, Script)
V(debug::Script, Script)
class Utils {
public:

3
deps/v8/src/arguments.h

@ -41,7 +41,8 @@ class Arguments BASE_EMBEDDED {
index * kPointerSize));
}
template <class S> Handle<S> at(int index) {
template <class S = Object>
Handle<S> at(int index) {
Object** value = &((*this)[index]);
// This cast checks that the object we're accessing does indeed have the
// expected type.

2
deps/v8/src/arm/assembler-arm-inl.h

@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; }
bool CpuFeatures::SupportsSimd128() { return true; }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;

786
deps/v8/src/arm/assembler-arm.cc

@ -351,13 +351,18 @@ Address RelocInfo::wasm_global_reference() {
return Assembler::target_address_at(pc_, host_);
}
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
ICacheFlushMode flush_mode) {
void RelocInfo::unchecked_update_wasm_size(uint32_t size,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@ -483,30 +488,6 @@ void NeonMemOperand::SetAlignment(int align) {
}
}
NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
base_ = base;
switch (registers_count) {
case 1:
type_ = nlt_1;
break;
case 2:
type_ = nlt_2;
break;
case 3:
type_ = nlt_3;
break;
case 4:
type_ = nlt_4;
break;
default:
UNREACHABLE();
type_ = nlt_1;
break;
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -2873,7 +2854,6 @@ void Assembler::vmov(const DwVfpRegister dst,
vm);
}
void Assembler::vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
@ -2969,7 +2949,6 @@ void Assembler::vmov(const Register dst,
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
}
// Type of data to read from or write to VFP register.
// Used as specifier in generic vcvt instruction.
enum VFPType { S32, U32, F32, F64 };
@ -3903,28 +3882,743 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
DCHECK(!srcdst0.is(kScratchDoubleReg));
DCHECK(!srcdst1.is(kScratchDoubleReg));
static int EncodeScalar(NeonDataType dt, int index) {
int opc1_opc2 = 0;
DCHECK_LE(0, index);
switch (dt) {
case NeonS8:
case NeonU8:
DCHECK_GT(8, index);
opc1_opc2 = 0x8 | index;
break;
case NeonS16:
case NeonU16:
DCHECK_GT(4, index);
opc1_opc2 = 0x1 | (index << 1);
break;
case NeonS32:
case NeonU32:
DCHECK_GT(2, index);
opc1_opc2 = index << 2;
break;
default:
UNREACHABLE();
break;
}
return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
}
void Assembler::vmov(NeonDataType dt, DwVfpRegister dst, int index,
Register src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.940.
// vmov ARM core register to scalar.
DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int opc1_opc2 = EncodeScalar(dt, index);
emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
opc1_opc2);
}
void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
int index) {
// Instruction details available in ARM DDI 0406C.b, A8.8.942.
// vmov Arm scalar to core register.
DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
int vn, n;
src.split_code(&vn, &n);
int opc1_opc2 = EncodeScalar(dt, index);
int u = (dt & NeonDataTypeUMask) != 0 ? 1 : 0;
emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
n * B7 | B4 | opc1_opc2);
}
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8-938.
// vmov is encoded as vorr.
vorr(dst, src, src);
}
if (CpuFeatures::IsSupported(NEON)) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
srcdst0.split_code(&vd, &d);
int vm, m;
srcdst1.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
}
void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
vm);
}
void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
const Register src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-886.
int B = 0, E = 0;
switch (size) {
case Neon8:
B = 1;
break;
case Neon16:
E = 1;
break;
case Neon32:
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
0xB * B8 | d * B7 | E * B5 | B4);
}
void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-884.
int index = src.code() & 1;
int d_reg = src.code() / 2;
int imm4 = 4 | index << 3; // esize = 32, index in bit 3.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
DwVfpRegister::from_code(d_reg).split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 | 0x18 * B7 |
B6 | m * B5 | vm);
}
// Encode NEON vcvt.src_type.dst_type instruction.
static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
const VFPType src_type, const QwNeonRegister src) {
DCHECK(src_type != dst_type);
DCHECK(src_type == F32 || dst_type == F32);
// Instruction details available in ARM DDI 0406C.b, A8.8.868.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int op = 0;
if (src_type == F32) {
DCHECK(dst_type == S32 || dst_type == U32);
op = dst_type == U32 ? 3 : 2;
} else {
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
DCHECK(src_type == S32 || src_type == U32);
op = src_type == U32 ? 1 : 0;
}
return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x3 * B9 | op * B7 |
B6 | m * B5 | vm;
}
void Assembler::vcvt_f32_s32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, S32, src));
}
void Assembler::vcvt_f32_u32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, U32, src));
}
void Assembler::vcvt_s32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(S32, dst, F32, src));
}
void Assembler::vcvt_u32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
// op is instr->Bits(11, 7).
static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK_IMPLIES(is_float, size == Neon32);
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int F = is_float ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 |
F * B10 | B8 | op * B7 | B6 | m * B5 | vm;
}
void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src));
}
void Assembler::vabs(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, false, size, dst, src));
}
void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src));
}
void Assembler::vneg(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, false, size, dst, src));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
B4 | vm);
}
enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
int op_encoding = 0;
switch (op) {
case VBIC:
op_encoding = 0x1 * B20;
break;
case VBIF:
op_encoding = B24 | 0x3 * B20;
break;
case VBIT:
op_encoding = B24 | 0x2 * B20;
break;
case VBSL:
op_encoding = B24 | 0x1 * B20;
break;
case VEOR:
op_encoding = B24;
break;
case VORR:
op_encoding = 0x2 * B20;
break;
case VORN:
op_encoding = 0x3 * B20;
break;
case VAND:
// op_encoding is 0.
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
n * B7 | B6 | m * B5 | B4 | vm;
}
void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = vand(Qn, Qm) SIMD AND.
// Instruction details available in ARM DDI 0406C.b, A8.8.836.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2));
}
void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vbsl(Qn, Qm) SIMD bitwise select.
// Instruction details available in ARM DDI 0406C.b, A8-844.
emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2));
}
void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = veor(Qn, Qm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2));
}
void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = vorr(Qn, Qm) SIMD OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.976.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
}
void Assembler::vadd(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point addition.
// Instruction details available in ARM DDI 0406C.b, A8-830.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | vm);
}
void Assembler::vadd(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer addition.
// Instruction details available in ARM DDI 0406C.b, A8-828.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vsub(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD floating point subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | B21 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
B6 | m * B5 | vm);
}
void Assembler::vsub(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD integer subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1084.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vmul(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point multiply.
// Instruction details available in ARM DDI 0406C.b, A8-958.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | B4 | vm);
}
void Assembler::vmul(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x9 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
static Instr EncodeNeonMinMax(bool is_min, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
return 0x1E4U * B23 | d * B22 | min * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonMinMax(bool is_min, NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x6 * B8 | B6 | m * B5 | min * B4 | vm;
}
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(true, dst, src1, src2));
}
void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD integer MIN.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(true, dt, dst, src1, src2));
}
void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD floating point MAX.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(false, dst, src1, src2));
}
void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD integer MAX.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(false, dt, dst, src1, src2));
}
static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
QwNeonRegister src) {
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int rsqrt = is_rsqrt ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x5 * B8 |
rsqrt * B7 | B6 | m * B5 | vm;
}
void Assembler::vrecpe(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrecpe(Qm) SIMD reciprocal estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1024.
emit(EncodeNeonEstimateOp(false, dst, src));
}
void Assembler::vrsqrte(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1038.
emit(EncodeNeonEstimateOp(true, dst, src));
}
static Instr EncodeNeonRefinementOp(bool is_rsqrt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int rsqrt = is_rsqrt ? 1 : 0;
return 0x1E4U * B23 | d * B22 | rsqrt * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | B4 | vm;
}
void Assembler::vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1026.
emit(EncodeNeonRefinementOp(false, dst, src1, src2));
}
void Assembler::vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1040.
emit(EncodeNeonRefinementOp(true, dst, src1, src2));
}
void Assembler::vtst(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vtst(Qn, Qm) SIMD test integer operands.
// Instruction details available in ARM DDI 0406C.b, A8-1098.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
void Assembler::vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD floating point compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xe * B8 | n * B7 | B6 |
m * B5 | vm);
}
void Assembler::vceq(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD integer compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
static Instr EncodeNeonCompareOp(const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int is_gt = (cond == gt) ? 1 : 0;
return 0x1E6U * B23 | d * B22 | is_gt * B21 | vn * B16 | vd * B12 | 0xe * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonCompareOp(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
int is_ge = (cond == ge) ? 1 : 0;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x3 * B8 | n * B7 | B6 | m * B5 | is_ge * B4 | vm;
}
void Assembler::vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dst, src1, src2, ge));
}
void Assembler::vcge(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, ge));
}
void Assembler::vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dst, src1, src2, gt));
}
void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, gt));
}
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes) {
DCHECK(IsEnabled(NEON));
// Qd = vext(Qn, Qm) SIMD byte extract.
// Instruction details available in ARM DDI 0406C.b, A8-890.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
DCHECK_GT(16, bytes);
emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vzip(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 |
0x3 * B7 | B6 | m * B5 | vm);
}
static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
DCHECK_GT(op_size, static_cast<int>(size));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
B6 | m * B5 | vm;
}
void Assembler::vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon16, size, dst, src));
}
void Assembler::vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon32, size, dst, src));
}
void Assembler::vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon64, size, dst, src));
}
// Encode NEON vtbl / vtbx instruction.
static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index, bool vtbx) {
// Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094.
// Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
list.base().split_code(&vn, &n);
int vm, m;
index.split_code(&vm, &m);
int op = vtbx ? 1 : 0; // vtbl = 0, vtbx = 1.
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | 0x2 * B10 |
list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
}
void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, false));
}
void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, true));
}
// Pseudo instructions.

131
deps/v8/src/arm/assembler-arm.h

@ -302,6 +302,20 @@ struct QwNeonRegister {
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
DwVfpRegister low() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
DwVfpRegister high() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2 + 1;
DCHECK(reg.is_valid());
return reg;
}
int reg_code;
};
@ -403,9 +417,11 @@ const QwNeonRegister q15 = { 15 };
// compilation unit that includes this header doesn't use the variables.
#define kFirstCalleeSavedDoubleReg d8
#define kLastCalleeSavedDoubleReg d15
// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg.
#define kDoubleRegZero d14
#define kScratchDoubleReg d15
// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
#define kScratchQuadReg q7
// Coprocessor register
struct CRegister {
@ -624,12 +640,26 @@ class NeonMemOperand BASE_EMBEDDED {
// Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED {
public:
explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
explicit NeonListOperand(DoubleRegister base, int register_count = 1)
: base_(base), register_count_(register_count) {}
explicit NeonListOperand(QwNeonRegister q_reg)
: base_(q_reg.low()), register_count_(2) {}
DoubleRegister base() const { return base_; }
NeonListType type() const { return type_; }
int register_count() { return register_count_; }
int length() const { return register_count_ - 1; }
NeonListType type() const {
switch (register_count_) {
default: UNREACHABLE();
// Fall through.
case 1: return nlt_1;
case 2: return nlt_2;
case 3: return nlt_3;
case 4: return nlt_4;
}
}
private:
DoubleRegister base_;
NeonListType type_;
int register_count_;
};
@ -1133,6 +1163,8 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
// TODO(bbudge) Replace uses of these with the more general core register to
// scalar register vmov's.
void vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
@ -1313,8 +1345,86 @@ class Assembler : public AssemblerBase {
const NeonMemOperand& dst);
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
// Currently, vswp supports only D0 to D31.
void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
// Only unconditional core <-> scalar moves are currently supported.
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
void vmov(const QwNeonRegister dst, const QwNeonRegister src);
void vmvn(const QwNeonRegister dst, const QwNeonRegister src);
void vswp(DwVfpRegister dst, DwVfpRegister src);
void vswp(QwNeonRegister dst, QwNeonRegister src);
// vdup conditional execution isn't supported.
void vdup(NeonSize size, const QwNeonRegister dst, const Register src);
void vdup(const QwNeonRegister dst, const SwVfpRegister src);
void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vneg(const QwNeonRegister dst, const QwNeonRegister src);
void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vadd(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vmax(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmax(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
// vrecpe and vrsqrte only support floating point lanes.
void vrecpe(const QwNeonRegister dst, const QwNeonRegister src);
void vrsqrte(const QwNeonRegister dst, const QwNeonRegister src);
void vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vtst(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcgt(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
void vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
// Pseudo instructions
@ -1395,9 +1505,6 @@ class Assembler : public AssemblerBase {
// Debugging
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@ -1611,6 +1718,12 @@ class Assembler : public AssemblerBase {
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
}
bool VfpRegisterIsAvailable(QwNeonRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
}
private:
int next_buffer_check_; // pc offset of next buffer check

486
deps/v8/src/arm/code-stubs-arm.cc

@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@ -635,8 +624,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs);
__ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
__ Push(cp);
__ Call(strict() ? isolate()->builtins()->StrictEqual()
: isolate()->builtins()->Equal(),
RelocInfo::CODE_TARGET);
__ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@ -805,7 +797,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
@ -2075,46 +2066,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
String::Encoding encoding) {
if (FLAG_debug_code) {
// Check that destination is word aligned.
__ tst(dest, Operand(kPointerAlignmentMask));
__ Check(eq, kDestinationOfCopyNotAligned);
}
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
if (encoding == String::TWO_BYTE_ENCODING) {
__ add(count, count, Operand(count), SetCC);
}
Register limit = count; // Read until dest equals this.
__ add(limit, dest, Operand(count));
Label loop_entry, loop;
// Copy bytes from src to dest until dest hits limit.
__ b(&loop_entry);
__ bind(&loop);
__ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
__ strb(scratch, MemOperand(dest, 1, PostIndex));
__ bind(&loop_entry);
__ cmp(dest, Operand(limit));
__ b(lt, &loop);
__ bind(&done);
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@ -2690,84 +2641,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(ne, miss);
}
// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2) {
DCHECK(!elements.is(scratch1));
DCHECK(!elements.is(scratch2));
DCHECK(!name.is(scratch1));
DCHECK(!name.is(scratch2));
__ AssertName(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
__ SmiUntag(scratch1);
__ sub(scratch1, scratch1, Operand(1));
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
__ add(scratch2, elements, Operand(scratch2, LSL, 2));
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
__ b(eq, done);
}
const int spill_mask =
(lr.bit() | r6.bit() | r5.bit() | r4.bit() |
r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
if (name.is(r0)) {
DCHECK(!elements.is(r1));
__ Move(r1, name);
__ Move(r0, elements);
} else {
__ Move(r0, elements);
__ Move(r1, name);
}
NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
__ b(ne, done);
__ b(eq, miss);
}
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@ -3057,238 +2930,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Register cached_map = scratch1;
__ ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
Register length = scratch2;
__ bind(&start_polymorphic);
__ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
// If the IC could be monomorphic we have to make sure we don't go past the
// end of the feedback array.
__ cmp(length, Operand(Smi::FromInt(2)));
__ b(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
__ bind(&next_loop);
__ ldr(cached_map, MemOperand(pointer_reg));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &prepare_next);
__ ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&prepare_next);
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
__ cmp(pointer_reg, too_far);
__ b(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ cmp(cached_map, receiver_map);
__ b(ne, try_array);
Register handler = feedback;
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Label transition_call;
Register cached_map = scratch1;
Register too_far = scratch2;
Register pointer_reg = feedback;
__ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
// +-----+------+------+-----+-----+-----+ ... ----+
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
__ bind(&next_loop);
__ ldr(cached_map, MemOperand(pointer_reg));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &prepare_next);
// Is it a transitioning store?
__ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
__ b(ne, &transition_call);
__ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
__ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&transition_call);
__ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
__ JumpIfSmi(too_far, miss);
__ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&prepare_next);
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
__ cmp(pointer_reg, too_far);
__ b(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
Register key = StoreWithVectorDescriptor::NameRegister(); // r2
Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
Register feedback = r5;
Register receiver_map = r6;
Register scratch1 = r9;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
// We are using register r8, which is used for the embedded constant pool
// when FLAG_enable_embedded_constant_pool is true.
DCHECK(!FLAG_enable_embedded_constant_pool);
Register scratch2 = r8;
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
&miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ b(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
&miss);
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@ -3648,123 +3289,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void FastNewObjectStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : target
// -- r3 : new target
// -- cp : context
// -- lr : return address
// -----------------------------------
__ AssertFunction(r1);
__ AssertReceiver(r3);
// Verify that the new target is a JSFunction.
Label new_object;
__ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &new_object);
// Load the initial map and verify that it's in fact a map.
__ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &new_object);
__ CompareObjectType(r2, r0, r0, MAP_TYPE);
__ b(ne, &new_object);
// Fall back to runtime if the target differs from the new target's
// initial map constructor.
__ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
__ cmp(r0, r1);
__ b(ne, &new_object);
// Allocate the JSObject on the heap.
Label allocate, done_allocate;
__ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
__ bind(&done_allocate);
// Initialize the JSObject fields.
__ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
__ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
// -- r0 : result (tagged)
// -- r1 : result fields (untagged)
// -- r5 : result end (untagged)
// -- r2 : initial map
// -- cp : context
// -- lr : return address
// -----------------------------------
// Perform in-object slack tracking if requested.
Label slack_tracking;
STATIC_ASSERT(Map::kNoSlackTracking == 0);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
__ b(ne, &slack_tracking);
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r1, r5, r6);
__ Ret();
}
__ bind(&slack_tracking);
{
// Decrease generous allocation count.
STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
__ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
__ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
// Initialize the in-object fields with undefined.
__ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
__ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
__ InitializeFieldsWithFiller(r1, r4, r6);
// Initialize the remaining (reserved) fields with one pointer filler map.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r1, r5, r6);
// Check if we can finalize the instance size.
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
__ Ret(ne);
// Finalize the instance size.
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r2);
__ CallRuntime(Runtime::kFinalizeInstanceSize);
__ Pop(r0);
}
__ Ret();
}
// Fall back to %AllocateInNewSpace.
__ bind(&allocate);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
__ Push(r2, r4);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r2);
}
__ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
STATIC_ASSERT(kHeapObjectTag == 1);
__ sub(r5, r5, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
__ bind(&new_object);
__ Push(r1, r3);
__ TailCallRuntime(Runtime::kNewObject);
}
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function

19
deps/v8/src/arm/code-stubs-arm.h

@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
String::Encoding encoding);
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@ -280,14 +269,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1);
bool SometimesSetsUpAFrame() override { return false; }
private:

358
deps/v8/src/arm/codegen-arm.cc

@ -317,337 +317,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
Register scratch_elements = r4;
DCHECK(!AreAliased(receiver, key, value, target_map,
scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch_elements, allocation_memento_found);
}
// Set transitioned map.
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r4;
Register length = r5;
Register array = r6;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
__ sub(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(scratch1, array, Operand(kHeapObjectTag));
__ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
scratch1,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_end, scratch2, Operand(length, LSL, 2));
// Repurpose registers no longer in use.
Register hole_lower = elements;
Register hole_upper = length;
__ mov(hole_lower, Operand(kHoleNanLower32));
__ mov(hole_upper, Operand(kHoleNanUpper32));
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
__ bind(&only_change_map);
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&done);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ ldr(lr, MemOperand(scratch1, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, scratch2, 0);
__ add(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ SmiTag(lr);
__ orr(lr, lr, Operand(1));
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
__ bind(&entry);
__ cmp(scratch2, array_end);
__ b(lt, &loop);
__ pop(lr);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = r4;
Register array = r6;
Register length = r5;
Register scratch = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ Push(target_map, receiver, key, value);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
__ sub(array, array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ add(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ b(&initialization_loop_entry);
__ bind(&initialization_loop);
__ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ cmp(dst_elements, dst_end);
__ b(lt, &initialization_loop);
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// heap_number_map: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(target_map, receiver, key, value);
__ pop(lr);
__ b(fail);
__ bind(&loop);
Register upper_bits = key;
__ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ cmp(upper_bits, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
&gc_required);
// heap_number: new heap number
__ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
__ Strd(scratch2, upper_bits,
FieldMemOperand(heap_number, HeapNumber::kValueOffset));
__ mov(scratch2, dst_elements);
__ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
__ RecordWrite(array,
scratch2,
heap_number,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&entry);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
__ bind(&entry);
__ cmp(dst_elements, dst_end);
__ b(lt, &loop);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
array,
scratch,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
__ bind(&only_change_map);
// Update receiver's map.
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@ -771,31 +440,23 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
Address target_address = Memory::Address_at(
sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
@ -804,7 +465,6 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
}
} // namespace internal
} // namespace v8

25
deps/v8/src/arm/constants-arm.h

@ -190,6 +190,7 @@ enum {
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B10 = 1 << 10,
B12 = 1 << 12,
B16 = 1 << 16,
B17 = 1 << 17,
@ -218,7 +219,6 @@ enum {
kOff8Mask = (1 << 8) - 1
};
enum BarrierOption {
OSHLD = 0x1,
OSHST = 0x2,
@ -327,12 +327,12 @@ enum LFlag {
// NEON data type
enum NeonDataType {
NeonS8 = 0x1, // U = 0, imm3 = 0b001
NeonS16 = 0x2, // U = 0, imm3 = 0b010
NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonS8 = 0x1, // U = 0, imm3 = 0b001
NeonS16 = 0x2, // U = 0, imm3 = 0b010
NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonDataTypeSizeMask = 0x7,
NeonDataTypeUMask = 1 << 24
};
@ -374,10 +374,10 @@ const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
enum VFPRegPrecision {
kSinglePrecision = 0,
kDoublePrecision = 1
kDoublePrecision = 1,
kSimd128Precision = 2
};
// VFP FPSCR constants.
enum VFPConversionMode {
kFPSCRRounding = 0,
@ -667,15 +667,22 @@ class Instruction {
private:
// Join split register codes, depending on single or double precision.
// Join split register codes, depending on register precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
} else {
int reg_num = (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
if (pre == kDoublePrecision) {
return reg_num;
}
DCHECK_EQ(kSimd128Precision, pre);
DCHECK_EQ(reg_num & 1, 0);
return reg_num / 2;
}
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
// We need to prevent the creation of instances of class Instruction.

379
deps/v8/src/arm/disasm-arm.cc

@ -1419,6 +1419,9 @@ int Decoder::DecodeType7(Instruction* instr) {
// Sd = vsqrt(Sm)
// vmrs
// vmsr
// Qd = vdup.size(Qd, Rt)
// vmov.size: Dd[i] = Rt
// vmov.sign.size: Rt = Dn[i]
void Decoder::DecodeTypeVFP(Instruction* instr) {
VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
VERIFY(instr->Bits(11, 9) == 0x5);
@ -1531,21 +1534,71 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) &&
(instr->VCValue() == 0x1) &&
(instr->Bit(23) == 0x0)) {
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
} else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
if (instr->Bit(23) == 0) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
if ((opc1_opc2 & 0xb) == 0) {
// NeonS32/NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
} else {
int vd = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.8 d%d[%d], r%d", vd, i, rt);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.16 d%d[%d], r%d", vd, i, rt);
} else {
Unknown(instr);
}
}
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x1) &&
(instr->Bit(23) == 0x0)) {
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
int size = 32;
if (instr->Bit(5) != 0)
size = 16;
else if (instr->Bit(22) != 0)
size = 8;
int Vd = instr->VFPNRegValue(kSimd128Precision);
int Rt = instr->RtValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup.%i q%d, r%d", size, Vd, Rt);
}
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
if ((opc1_opc2 & 0xb) == 0) {
// NeonS32 / NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
} else {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
}
} else {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
const char* sign = instr->Bit(23) != 0 ? "u" : "s";
int rt = instr->RtValue();
int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]",
sign, rt, vn, i);
} else {
Unknown(instr);
}
}
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@ -1563,6 +1616,8 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vmrs'cond 'rt, FPSCR");
}
}
} else {
Unknown(instr); // Not used by V8.
}
}
}
@ -1801,6 +1856,104 @@ static const char* const barrier_option_names[] = {
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 4:
if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (Vm == Vn) {
// vmov Qd, Qm
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov q%d, q%d", Vd, Vm);
} else {
// vorr Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vorr q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 8) {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vsub.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vand Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vand q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
// vmin/max.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@ -1811,6 +1964,96 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
} else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
// vext.8 Qd, Qm, Qn, imm4
int imm4 = instr->Bits(11, 8);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
Vd, Vn, Vm, imm4);
} else {
Unknown(instr);
}
break;
case 6:
if (instr->Bits(11, 8) == 8) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vsub.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
} else {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vceq.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vbsl q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 1) {
if (instr->Bit(6) == 0) {
// veor Dd, Dn, Dm
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor d%d, d%d, d%d", Vd, Vn, Vm);
} else {
// veor Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
@ -1825,13 +2068,109 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
} else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
(instr->Bit(4) == 0)) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
char rtype = (instr->Bit(6) == 0) ? 'd' : 'q';
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp %c%d, %c%d", rtype, Vd, rtype, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
instr->Bit(4) == 0) {
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
if (instr->Bit(6) == 0) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp d%d, d%d", Vd, Vm);
} else {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp q%d, q%d", Vd, Vm);
}
} else if (instr->Bits(11, 7) == 0x18) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int index = instr->Bit(19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup q%d, d%d[%d]", Vd, Vm, index);
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
} else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* suffix = nullptr;
int op = instr->Bits(8, 7);
switch (op) {
case 0:
suffix = "f32.s32";
break;
case 1:
suffix = "f32.u32";
break;
case 2:
suffix = "s32.f32";
break;
case 3:
suffix = "u32.f32";
break;
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vcvt.%s q%d, q%d", suffix, Vd, Vm);
} else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int len = instr->Bits(9, 8);
NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
FormatNeonList(Vn, list.type());
Print(", ");
PrintDRegister(Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
// vzip.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vzip.%d q%d, q%d", size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
int op = kBitsPerByte
<< (static_cast<int>(Neon64) - instr->Bits(8, 7));
// vrev<op>.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vrev%d.%d q%d, q%d", op, size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
const char* type = instr->Bit(10) != 0 ? "f" : "s";
if (instr->Bits(9, 6) == 0xd) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d",
type, size, Vd, Vm);
} else if (instr->Bits(9, 6) == 0xf) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d",
type, size, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
// vrecpe/vrsqrte.f32 Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
}
} else {
Unknown(instr);
}

8
deps/v8/src/arm/interface-descriptors-arm.cc

@ -66,13 +66,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
Register registers[] = {r1, r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

430
deps/v8/src/arm/macro-assembler-arm.cc

@ -264,6 +264,35 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
}
}
void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
DCHECK(!srcdst0.is(kScratchDoubleReg));
DCHECK(!srcdst1.is(kScratchDoubleReg));
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
}
}
void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (!srcdst0.is(srcdst1)) {
vswp(srcdst0, srcdst1);
}
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) {
if (CpuFeatures::IsSupported(ARMv7)) {
@ -1052,8 +1081,8 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::VmovExtended(Register dst, int src_code) {
DCHECK_LE(32, src_code);
DCHECK_GT(64, src_code);
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
if (src_code & 0x1) {
VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
} else {
@ -1062,8 +1091,8 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) {
}
void MacroAssembler::VmovExtended(int dst_code, Register src) {
DCHECK_LE(32, dst_code);
DCHECK_GT(64, dst_code);
DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
} else {
@ -1073,22 +1102,23 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
void MacroAssembler::VmovExtended(int dst_code, int src_code,
Register scratch) {
if (src_code < 32 && dst_code < 32) {
if (src_code < SwVfpRegister::kMaxNumRegisters &&
dst_code < SwVfpRegister::kMaxNumRegisters) {
// src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code));
} else if (src_code < 32) {
} else if (src_code < SwVfpRegister::kMaxNumRegisters) {
// src is an s-register.
vmov(scratch, SwVfpRegister::from_code(src_code));
VmovExtended(dst_code, scratch);
} else if (dst_code < 32) {
} else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
// dst is an s-register.
VmovExtended(scratch, src_code);
vmov(SwVfpRegister::from_code(dst_code), scratch);
} else {
// Neither src or dst are s-registers.
DCHECK_GT(64, src_code);
DCHECK_GT(64, dst_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
VmovExtended(scratch, src_code);
VmovExtended(dst_code, scratch);
}
@ -1096,7 +1126,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code,
void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
Register scratch) {
if (dst_code >= 32) {
if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
ldr(scratch, src);
VmovExtended(dst_code, scratch);
} else {
@ -1106,7 +1136,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
Register scratch) {
if (src_code >= 32) {
if (src_code >= SwVfpRegister::kMaxNumRegisters) {
VmovExtended(scratch, src_code);
str(scratch, dst);
} else {
@ -1114,6 +1144,105 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
}
}
void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
DwVfpRegister double_source =
DwVfpRegister::from_code(src.code() * 2 + double_word);
vmov(dt, dst, double_source, double_lane);
}
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
Register scratch, int lane) {
int s_code = src.code() * 4 + lane;
VmovExtended(dst.code(), s_code, scratch);
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
DwVfpRegister double_dst =
DwVfpRegister::from_code(dst.code() * 2 + double_word);
vmov(dt, double_dst, double_lane, src_lane);
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, Register scratch,
int lane) {
Move(dst, src);
int s_code = dst.code() * 4 + lane;
VmovExtended(s_code, src_lane.code(), scratch);
}
void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
Register scratch, NeonSize size, uint32_t lanes) {
// TODO(bbudge) Handle Int16x8, Int8x16 vectors.
DCHECK_EQ(Neon32, size);
DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
if (size == Neon32) {
switch (lanes) {
// TODO(bbudge) Handle more special cases.
case 0x3210: // Identity.
Move(dst, src);
return;
case 0x1032: // Swap top and bottom.
vext(dst, src, src, 8);
return;
case 0x2103: // Rotation.
vext(dst, src, src, 12);
return;
case 0x0321: // Rotation.
vext(dst, src, src, 4);
return;
case 0x0000: // Equivalent to vdup.
case 0x1111:
case 0x2222:
case 0x3333: {
int lane_code = src.code() * 4 + (lanes & 0xF);
if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
// TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
int temp_code = kScratchDoubleReg.code() * 2;
VmovExtended(temp_code, lane_code, scratch);
lane_code = temp_code;
}
vdup(dst, SwVfpRegister::from_code(lane_code));
return;
}
case 0x2301: // Swap lanes 0, 1 and lanes 2, 3.
vrev64(Neon32, dst, src);
return;
default: // Handle all other cases with vmovs.
int src_code = src.code() * 4;
int dst_code = dst.code() * 4;
bool in_place = src.is(dst);
if (in_place) {
vmov(kScratchQuadReg, src);
src_code = kScratchQuadReg.code() * 4;
}
for (int i = 0; i < 4; i++) {
int lane = (lanes >> (i * 4) & 0xF);
VmovExtended(dst_code + i, src_code + lane, scratch);
}
if (in_place) {
// Restore zero reg.
veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
return;
}
}
}
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
@ -1629,18 +1758,16 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference last_step_action =
ExternalReference::debug_last_step_action_address(isolate());
STATIC_ASSERT(StepFrame > StepIn);
mov(r4, Operand(last_step_action));
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(StepIn));
b(lt, &skip_flooding);
cmp(r4, Operand(0));
b(eq, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -1657,7 +1784,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@ -1671,7 +1798,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
bind(&skip_flooding);
bind(&skip_hook);
}
@ -1685,8 +1812,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
if (call_wrapper.NeedsDebugStepCheck()) {
FloodFunctionIfStepping(function, new_target, expected, actual);
if (call_wrapper.NeedsDebugHookCheck()) {
CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@ -2177,112 +2304,6 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
add(scratch1, scratch1,
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
length,
Heap::kStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
DCHECK(kCharSize == 1);
add(scratch1, length,
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kSlicedStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
@ -2314,68 +2335,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, ip);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
void MacroAssembler::StoreNumberToDoubleElements(
Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
VFPCanonicalizeNaN(double_scratch);
b(&store);
bind(&smi_value);
SmiToDouble(double_scratch, value_reg);
bind(&store);
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
vstr(double_scratch,
FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
@ -2878,28 +2837,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
ldr(scratch, NativeContextMemOperand());
ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
ldr(map_in_out,
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, NativeContextMemOperand());
ldr(dst, ContextMemOperand(dst, index));
@ -2962,15 +2899,6 @@ void MacroAssembler::UntagAndJumpIfSmi(
b(cc, smi_case); // Shifter carry is not set for a smi.
}
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
SmiUntag(dst, src, SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
@ -3411,19 +3339,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
b(ne, failure);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch,
Label* failure) {
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch, type, Operand(kFlatOneByteStringMask));
cmp(scratch, Operand(kFlatOneByteStringTag));
b(ne, failure);
}
static const int kRegisterPassedArguments = 4;
@ -3861,45 +3776,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register object,
Register scratch0,
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
Register current = scratch0;
Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
b(eq, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
cmp(scratch1, Operand(JS_OBJECT_TYPE));
b(lo, found);
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
b(ne, &loop_again);
bind(&end);
}
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,

104
deps/v8/src/arm/macro-assembler-arm.h

@ -184,6 +184,10 @@ class MacroAssembler: public Assembler {
}
void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
void Move(QwNeonRegister dst, QwNeonRegister src);
// Register swap.
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
@ -557,6 +561,16 @@ class MacroAssembler: public Assembler {
void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
NeonDataType dt, int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, Register scratch, int lane);
void Swizzle(QwNeonRegister dst, QwNeonRegister src, Register scratch,
NeonSize size, uint32_t lanes);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
@ -635,17 +649,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@ -678,9 +681,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@ -794,32 +798,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@ -884,29 +862,6 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
@ -1287,10 +1242,6 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Test if the register contains a smi (Z == 0 (eq) if true).
inline void SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
@ -1380,11 +1331,6 @@ class MacroAssembler: public Assembler {
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
// Check if instance type is sequential one-byte string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Label* failure);
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
@ -1464,20 +1410,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
&no_memento_found);
b(eq, memento_found);
bind(&no_memento_found);
}
// Jumps to found label if a prototype map has dictionary elements.
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);

1201
deps/v8/src/arm/simulator-arm.cc

File diff suppressed because it is too large

11
deps/v8/src/arm/simulator-arm.h

@ -151,10 +151,11 @@ class Simulator {
void set_d_register(int dreg, const uint64_t* value);
void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value);
void get_q_register(int qreg, uint64_t* value);
void set_q_register(int qreg, const uint64_t* value);
void get_q_register(int qreg, uint32_t* value);
void set_q_register(int qreg, const uint32_t* value);
// Support for NEON.
template <typename T>
void get_q_register(int qreg, T* value);
template <typename T>
void set_q_register(int qreg, const T* value);
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
@ -339,6 +340,8 @@ class Simulator {
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
int32_t ConvertDoubleToInt(double val, bool unsigned_integer,
VFPRoundingMode mode);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.

15
deps/v8/src/arm64/assembler-arm64.cc

@ -194,13 +194,18 @@ Address RelocInfo::wasm_global_reference() {
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
ICacheFlushMode flush_mode) {
void RelocInfo::unchecked_update_wasm_size(uint32_t size,
ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
}
@ -2950,15 +2955,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsGeneratorContinuation(rmode));
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);

3
deps/v8/src/arm64/assembler-arm64.h

@ -938,9 +938,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);

452
deps/v8/src/arm64/code-stubs-arm64.cc

@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@ -590,8 +579,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs);
__ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
__ Push(cp);
__ Call(strict() ? isolate()->builtins()->StrictEqual()
: isolate()->builtins()->Equal(),
RelocInfo::CODE_TARGET);
__ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@ -2980,234 +2972,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register cached_map = scratch1;
__ Ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(feedback);
Register length = scratch2;
__ Bind(&start_polymorphic);
__ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
__ Cmp(length, Operand(Smi::FromInt(2)));
__ B(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(pointer_reg, feedback,
FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
__ Bind(&next_loop);
__ Ldr(cached_map, MemOperand(pointer_reg));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &prepare_next);
__ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
__ Bind(&prepare_next);
__ Add(pointer_reg, pointer_reg, kPointerSize * 2);
__ Cmp(pointer_reg, too_far);
__ B(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(cached_map, receiver_map);
__ B(ne, try_array);
Register handler = feedback;
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Label transition_call;
Register cached_map = scratch1;
Register too_far = scratch2;
Register pointer_reg = feedback;
__ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
// +-----+------+------+-----+-----+-----+ ... ----+
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(pointer_reg, feedback,
FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
__ Bind(&next_loop);
__ Ldr(cached_map, MemOperand(pointer_reg));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &prepare_next);
// Is it a transitioning store?
__ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
__ B(ne, &transition_call);
__ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
__ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(pointer_reg);
__ Bind(&transition_call);
__ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
__ JumpIfSmi(too_far, miss);
__ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
__ Jump(receiver_map);
__ Bind(&prepare_next);
__ Add(pointer_reg, pointer_reg, kPointerSize * 3);
__ Cmp(pointer_reg, too_far);
__ B(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
Register key = StoreWithVectorDescriptor::NameRegister(); // x2
Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
Register feedback = x5;
Register receiver_map = x6;
Register scratch1 = x7;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ Bind(&try_array);
// Is it a fixed array?
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
// We have a polymorphic element handler.
Label try_poly_name;
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
__ Bind(&not_array);
// Is it generic?
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Cmp(key, feedback);
__ B(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@ -3309,91 +3073,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
// Probe the name dictionary in the 'elements' register.
// Jump to the 'done' label if a property with the given name is found.
// Jump to the 'miss' label otherwise.
//
// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
// 'elements' and 'name' registers are preserved on miss.
void NameDictionaryLookupStub::GeneratePositiveLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
// Assert that name contains a string.
__ AssertName(name);
// Compute the capacity mask.
__ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
__ Sub(scratch1, scratch1, 1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
UseScratchRegisterScope temps(masm);
Register scratch3 = temps.AcquireX();
__ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
__ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
__ Cmp(name, scratch3);
__ B(eq, done);
}
// The inlined probes didn't find the entry.
// Call the complete stub to scan the whole dictionary.
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
spill_list.Combine(lr);
spill_list.Remove(scratch1);
spill_list.Remove(scratch2);
__ PushCPURegList(spill_list);
if (name.is(x0)) {
DCHECK(!elements.is(x1));
__ Mov(x1, name);
__ Mov(x0, elements);
} else {
__ Mov(x0, elements);
__ Mov(x1, name);
}
Label not_found;
NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ Cbz(x0, &not_found);
__ Mov(scratch2, x2); // Move entry index into scratch2.
__ PopCPURegList(spill_list);
__ B(done);
__ Bind(&not_found);
__ PopCPURegList(spill_list);
__ B(miss);
}
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
@ -3875,127 +3554,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
void FastNewObjectStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x3 : new target
// -- cp : context
// -- lr : return address
// -----------------------------------
__ AssertFunction(x1);
__ AssertReceiver(x3);
// Verify that the new target is a JSFunction.
Label new_object;
__ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
// Load the initial map and verify that it's in fact a map.
__ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(x2, &new_object);
__ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
// Fall back to runtime if the target differs from the new target's
// initial map constructor.
__ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
__ CompareAndBranch(x0, x1, ne, &new_object);
// Allocate the JSObject on the heap.
Label allocate, done_allocate;
__ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
__ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
__ Bind(&done_allocate);
// Initialize the JSObject fields.
STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
__ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
__ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
__ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
__ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
// -- x0 : result (tagged)
// -- x1 : result fields (untagged)
// -- x5 : result end (untagged)
// -- x2 : initial map
// -- cp : context
// -- lr : return address
// -----------------------------------
// Perform in-object slack tracking if requested.
Label slack_tracking;
STATIC_ASSERT(Map::kNoSlackTracking == 0);
__ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
__ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
__ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
&slack_tracking);
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(x1, x5, x6);
__ Ret();
}
__ Bind(&slack_tracking);
{
// Decrease generous allocation count.
STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
__ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
__ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
// Initialize the in-object fields with undefined.
__ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
__ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
__ InitializeFieldsWithFiller(x1, x4, x6);
// Initialize the remaining (reserved) fields with one pointer filler map.
__ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(x1, x5, x6);
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
__ Ret();
// Finalize the instance size.
__ Bind(&finalize);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x0, x2);
__ CallRuntime(Runtime::kFinalizeInstanceSize);
__ Pop(x0);
}
__ Ret();
}
// Fall back to %AllocateInNewSpace.
__ Bind(&allocate);
{
FrameScope scope(masm, StackFrame::INTERNAL);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ Mov(x4,
Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
__ Push(x2, x4);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(x2);
}
__ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
__ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
STATIC_ASSERT(kHeapObjectTag == 1);
__ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
__ B(&done_allocate);
// Fall back to %NewObject.
__ Bind(&new_object);
__ Push(x1, x3);
__ TailCallRuntime(Runtime::kNewObject);
}
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : function

8
deps/v8/src/arm64/code-stubs-arm64.h

@ -355,14 +355,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2);
bool SometimesSetsUpAFrame() override { return false; }
private:

290
deps/v8/src/arm64/codegen-arm64.cc

@ -40,272 +40,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
ASM_LOCATION(
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
DCHECK(!AreAliased(receiver, key, value, target_map));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
Label gc_required, only_change_map;
Register elements = x4;
Register length = x5;
Register array_size = x6;
Register array = x7;
Register scratch = x6;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array_size, array));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Move(x10, array);
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
Label entry, done;
__ B(&entry);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(lr);
__ B(fail);
// Iterate over the array, copying and coverting smis to doubles. If an
// element is non-smi, write a hole to the destination.
{
Label loop;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
__ Tst(x13, kSmiTagMask);
__ Fcsel(d0, d0, nan_d, eq);
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(lr);
__ Bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
Register elements = x4;
Register array_size = x6;
Register array = x7;
Register length = x5;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array_size, array, length));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
Register the_hole = x14;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ B(&initialization_loop_entry);
__ bind(&initialization_loop);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &initialization_loop);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Register heap_num_map = x15;
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
__ B(&entry);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(value, key, receiver, target_map);
__ Pop(lr);
__ B(fail);
{
Label loop, convert_hole;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ Cmp(x13, kHoleNanInt64);
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = length;
Register scratch = array_size;
Register scratch2 = elements;
__ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
x13, heap_num_map);
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&entry);
// Replace the-hole NaN with the-hole pointer.
__ Bind(&convert_hole);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(value, key, receiver, target_map);
// Replace receiver's backing store with newly created and filled FixedArray.
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(lr);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
@ -338,30 +72,22 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence);
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
GetCodeAgeAndParity(stub, age, parity);
}
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code * stub = GetCodeAgeStub(isolate, age, parity);
Code* stub = GetCodeAgeStub(isolate, age);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}

12
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -64,14 +64,10 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
Register registers[] = {x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x3};
// x1: function info
// x2: feedback vector
// x3: slot
Register registers[] = {x1, x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

316
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -2203,65 +2203,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Bind(&done);
}
void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure, SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
DCHECK(smi_check == DONT_DO_SMI_CHECK);
Label not_smi;
JumpIfEitherSmi(first, second, NULL, &not_smi);
// At least one input is a smi, but the flags indicated a smi check wasn't
// needed.
Abort(kUnexpectedSmi);
Bind(&not_smi);
}
// Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, failure);
}
void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatOneByteStringTag);
Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch,
Label* failure) {
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch, type, kFlatOneByteStringMask);
Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure);
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@ -2425,17 +2366,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke);
}
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference last_step_action =
ExternalReference::debug_last_step_action_address(isolate());
STATIC_ASSERT(StepFrame > StepIn);
Mov(x4, Operand(last_step_action));
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
CompareAndBranch(x4, Operand(0), eq, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -2452,7 +2391,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@ -2466,7 +2405,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
bind(&skip_flooding);
bind(&skip_hook);
}
@ -2480,7 +2419,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(x1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
FloodFunctionIfStepping(function, new_target, expected, actual);
if (call_wrapper.NeedsDebugHookCheck()) {
CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@ -2709,12 +2650,12 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
@ -2725,7 +2666,18 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
// jssp[3] : fp
// jssp[1] : type
// jssp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(xzr, lr);
Push(fp, type_reg);
Add(fp, csp, TypedFrameConstants::kFixedFrameSizeFromFp);
// csp[3] for alignment
// csp[2] : lr
// csp[1] : fp
// csp[0] : type
} else {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
@ -3208,114 +3160,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
ObjectTag(result, result);
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
Add(scratch1, length, length); // Length in bytes, not chars.
Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate two-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
length,
Heap::kStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
STATIC_ASSERT(kCharSize == 1);
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate one-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kSlicedStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
}
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
@ -3664,59 +3508,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
// If cond==ls, set cond=hi, otherwise compare.
Ccmp(scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
B(hi, fail);
}
// Note: The ARM version of this clobbers elements_reg, but this version does
// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
FPRegister fpscratch1,
Label* fail,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label store_num;
// Speculatively convert the smi to a double - all smis can be exactly
// represented as a double.
SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
// If value_reg is a smi, we're done.
JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number.
JumpIfNotHeapNumber(value_reg, fail);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Canonicalize NaNs.
CanonicalizeNaN(fpscratch1);
// Store the result.
Bind(&store_num);
Add(scratch1, elements_reg,
Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
Str(fpscratch1,
FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@ -4276,39 +4067,6 @@ void MacroAssembler::JumpIfBlack(Register object,
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register object,
Register scratch0,
Register scratch1,
Label* found) {
DCHECK(!AreAliased(object, scratch0, scratch1));
Register current = scratch0;
Label loop_again, end;
// Scratch contains elements pointer.
Mov(current, object);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
// Loop based on the map going up the prototype chain.
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
B(lo, found);
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
Bind(&end);
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register shift_scratch, Register load_scratch,
Register length_scratch,
@ -4471,30 +4229,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
TmpList()->set_list(old_tmp_list);
}
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch1,
Register scratch2,
Label* no_map_match) {
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, NativeContextMemOperand());
Ldr(scratch2,
ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
// Use the transitioned cached map.
Ldr(map_in_out,
ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ldr(dst, NativeContextMemOperand());
Ldr(dst, ContextMemOperand(dst, index));

94
deps/v8/src/arm64/macro-assembler-arm64.h

@ -1101,24 +1101,6 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ----
// Jump to label if either object is not a sequential one-byte string.
// Optionally perform a smi check on the objects first.
void JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
// Check if instance type is sequential one-byte string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Label* failure);
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@ -1227,9 +1209,11 @@ class MacroAssembler : public Assembler {
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@ -1360,32 +1344,6 @@ class MacroAssembler : public Assembler {
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
// All registers are clobbered.
@ -1566,21 +1524,6 @@ class MacroAssembler : public Assembler {
Label* if_any_set,
Label* fall_through);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
// Check to see if number can be stored as a double in FastDoubleElements.
// If it can, store it at the index specified by key_reg in the array,
// otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
FPRegister fpscratch1,
Label* fail,
int elements_offset = 0);
// ---------------------------------------------------------------------------
// Inline caching support.
@ -1624,17 +1567,6 @@ class MacroAssembler : public Assembler {
Register scratch2,
Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver,
Register scratch1,
Register scratch2,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
&no_memento_found);
B(eq, memento_found);
Bind(&no_memento_found);
}
// The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations.
@ -1902,18 +1834,6 @@ class MacroAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(BailoutReason reason);
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch1,
Register scratch2,
Label* no_map_match);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
@ -2002,10 +1922,6 @@ class MacroAssembler : public Assembler {
// sequence is a code age sequence (emitted by EmitCodeAgeSequence).
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
// Jumps to found label if a prototype map has dictionary elements.
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Perform necessary maintenance operations before a push or after a pop.
//
// Note that size is specified in bytes.

1
deps/v8/src/asmjs/OWNERS

@ -4,6 +4,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
jpp@chromium.org
mtrofin@chromium.org
rossberg@chromium.org

129
deps/v8/src/asmjs/asm-js.cc

@ -9,6 +9,7 @@
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
#include "src/compilation-info.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
@ -31,6 +32,15 @@ namespace v8 {
namespace internal {
namespace {
enum WasmDataEntries {
kWasmDataCompiledModule,
kWasmDataForeignGlobals,
kWasmDataUsesArray,
kWasmDataScript,
kWasmDataScriptPosition,
kWasmDataEntryCount,
};
Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
Handle<JSReceiver> stdlib,
Handle<Name> name) {
@ -151,29 +161,38 @@ bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
} // namespace
MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
wasm::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
info->literal());
if (!typer.Validate()) {
base::ElapsedTimer asm_wasm_timer;
asm_wasm_timer.Start();
wasm::AsmWasmBuilder builder(info);
Handle<FixedArray> foreign_globals;
auto asm_wasm_result = builder.Run(&foreign_globals);
if (!asm_wasm_result.success) {
DCHECK(!info->isolate()->has_pending_exception());
PrintF("Validation of asm.js module failed: %s", typer.error_message());
if (!FLAG_suppress_asm_messages) {
MessageHandler::ReportMessage(info->isolate(),
builder.typer()->message_location(),
builder.typer()->error_message());
}
return MaybeHandle<FixedArray>();
}
v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
info->literal(), &typer);
i::Handle<i::FixedArray> foreign_globals;
auto asm_wasm_result = builder.Run(&foreign_globals);
double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
static_cast<int>(asm_offsets->size()));
i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
base::ElapsedTimer compile_timer;
compile_timer.Start();
MaybeHandle<JSObject> compiled = wasm::CreateModuleObjectFromBytes(
info->isolate(), module->begin(), module->end(), &thrower,
internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(),
asm_offsets->end());
internal::wasm::kAsmJsOrigin, info->script(), asm_offsets_vec);
DCHECK(!compiled.is_null());
double compile_time = compile_timer.Elapsed().InMillisecondsF();
wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
int count = 0;
@ -181,16 +200,45 @@ MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
uses_array->set(count++, Smi::FromInt(i));
}
Handle<FixedArray> result = info->isolate()->factory()->NewFixedArray(3);
result->set(0, *compiled.ToHandleChecked());
result->set(1, *foreign_globals);
result->set(2, *uses_array);
Handle<FixedArray> result =
info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
result->set(kWasmDataForeignGlobals, *foreign_globals);
result->set(kWasmDataUsesArray, *uses_array);
result->set(kWasmDataScript, *info->script());
result->set(kWasmDataScriptPosition,
Smi::FromInt(info->literal()->position()));
MessageLocation location(info->script(), info->literal()->position(),
info->literal()->position());
char text[100];
int length;
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
length =
base::OS::SNPrintF(text, arraysize(text),
"success, asm->wasm: %0.3f ms, compile: %0.3f ms",
asm_wasm_time, compile_time);
}
DCHECK_NE(-1, length);
USE(length);
Handle<String> stext(info->isolate()->factory()->InternalizeUtf8String(text));
Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
info->isolate(), MessageTemplate::kAsmJsCompiled, &location, stext,
Handle<JSArray>::null());
message->set_error_level(v8::Isolate::kMessageInfo);
if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
MessageHandler::ReportMessage(info->isolate(), &location, message);
}
return result;
}
bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib) {
i::Handle<i::FixedArray> uses(i::FixedArray::cast(wasm_data->get(2)));
i::Handle<i::FixedArray> uses(
i::FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
for (int i = 0; i < uses->length(); ++i) {
if (!IsStdlibMemberValid(isolate, stdlib,
uses->GetValueChecked<i::Object>(isolate, i))) {
@ -204,14 +252,27 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
Handle<FixedArray> wasm_data,
Handle<JSArrayBuffer> memory,
Handle<JSReceiver> foreign) {
i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0)));
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
i::Handle<i::WasmModuleObject> module(
i::WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
i::Handle<i::FixedArray> foreign_globals(
i::FixedArray::cast(wasm_data->get(1)));
i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
// Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object;
if (!foreign.is_null()) {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate->native_context()->object_function(), isolate);
ffi_object = isolate->factory()->NewJSObject(object_function);
JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
foreign, NONE);
}
i::MaybeHandle<i::JSObject> maybe_module_object =
i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign,
i::wasm::WasmModule::Instantiate(isolate, &thrower, module, ffi_object,
memory);
if (maybe_module_object.is_null()) {
return MaybeHandle<Object>();
@ -258,6 +319,32 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
!single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function;
}
i::Handle<i::Script> script(i::Script::cast(wasm_data->get(kWasmDataScript)));
int32_t position = 0;
if (!wasm_data->get(kWasmDataScriptPosition)->ToInt32(&position)) {
UNREACHABLE();
}
MessageLocation location(script, position, position);
char text[50];
int length;
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
length = base::OS::SNPrintF(text, arraysize(text), "success, %0.3f ms",
instantiate_timer.Elapsed().InMillisecondsF());
}
DCHECK_NE(-1, length);
USE(length);
Handle<String> stext(isolate->factory()->InternalizeUtf8String(text));
Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
isolate, MessageTemplate::kAsmJsInstantiated, &location, stext,
Handle<JSArray>::null());
message->set_error_level(v8::Isolate::kMessageInfo);
if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
MessageHandler::ReportMessage(isolate, &location, message);
}
return module_object;
}

4
deps/v8/src/asmjs/asm-js.h

@ -10,13 +10,13 @@
namespace v8 {
namespace internal {
class CompilationInfo;
class JSArrayBuffer;
class ParseInfo;
// Interface to compile and instantiate for asmjs.
class AsmJs {
public:
static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info);
static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,

407
deps/v8/src/asmjs/asm-typer.cc

@ -9,6 +9,7 @@
#include <memory>
#include <string>
#include "include/v8.h"
#include "src/v8.h"
#include "src/asmjs/asm-types.h"
@ -17,18 +18,33 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/globals.h"
#include "src/messages.h"
#include "src/utils.h"
#include "src/vector.h"
#define FAIL_LOCATION_RAW(location, msg) \
do { \
Handle<String> message( \
isolate_->factory()->InternalizeOneByteString(msg)); \
error_message_ = MessageHandler::MakeMessageObject( \
isolate_, MessageTemplate::kAsmJsInvalid, (location), message, \
Handle<JSArray>::null()); \
error_message_->set_error_level(v8::Isolate::kMessageWarning); \
message_location_ = *(location); \
return AsmType::None(); \
} while (false)
#define FAIL(node, msg) \
do { \
int line = node->position() == kNoSourcePosition \
? -1 \
: script_->GetLineNumber(node->position()); \
base::OS::SNPrintF(error_message_, sizeof(error_message_), \
"asm: line %d: %s\n", line + 1, msg); \
return AsmType::None(); \
#define FAIL_RAW(node, msg) \
do { \
MessageLocation location(script_, node->position(), node->position()); \
FAIL_LOCATION_RAW(&location, msg); \
} while (false)
#define FAIL_LOCATION(location, msg) \
FAIL_LOCATION_RAW(location, STATIC_CHAR_VECTOR(msg))
#define FAIL(node, msg) FAIL_RAW(node, STATIC_CHAR_VECTOR(msg))
#define RECURSE(call) \
do { \
if (GetCurrentStackPosition() < stack_limit_) { \
@ -90,6 +106,53 @@ Statement* AsmTyper::FlattenedStatements::Next() {
}
}
// ----------------------------------------------------------------------------
// Implementation of AsmTyper::SourceLayoutTracker
bool AsmTyper::SourceLayoutTracker::IsValid() const {
const Section* kAllSections[] = {&use_asm_, &globals_, &functions_, &tables_,
&exports_};
for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
const auto& curr_section = *kAllSections[ii];
for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
if (curr_section.IsPrecededBy(*kAllSections[jj])) {
return false;
}
}
}
return true;
}
void AsmTyper::SourceLayoutTracker::Section::AddNewElement(
const AstNode& node) {
const int node_pos = node.position();
if (start_ == kNoSourcePosition) {
start_ = node_pos;
} else {
start_ = std::min(start_, node_pos);
}
if (end_ == kNoSourcePosition) {
end_ = node_pos;
} else {
end_ = std::max(end_, node_pos);
}
}
bool AsmTyper::SourceLayoutTracker::Section::IsPrecededBy(
const Section& other) const {
if (start_ == kNoSourcePosition) {
DCHECK_EQ(end_, kNoSourcePosition);
return false;
}
if (other.start_ == kNoSourcePosition) {
DCHECK_EQ(other.end_, kNoSourcePosition);
return false;
}
DCHECK_LE(start_, end_);
DCHECK_LE(other.start_, other.end_);
return other.start_ <= end_;
}
// ----------------------------------------------------------------------------
// Implementation of AsmTyper::VariableInfo
@ -112,16 +175,16 @@ AsmTyper::VariableInfo* AsmTyper::VariableInfo::Clone(Zone* zone) const {
return new_var_info;
}
void AsmTyper::VariableInfo::FirstForwardUseIs(VariableProxy* var) {
DCHECK(first_forward_use_ == nullptr);
void AsmTyper::VariableInfo::SetFirstForwardUse(
const MessageLocation& source_location) {
missing_definition_ = true;
first_forward_use_ = var;
source_location_ = source_location;
}
// ----------------------------------------------------------------------------
// Implementation of AsmTyper
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
FunctionLiteral* root)
: isolate_(isolate),
zone_(zone),
@ -137,9 +200,9 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
stack_limit_(isolate->stack_guard()->real_climit()),
node_types_(zone_),
fround_type_(AsmType::FroundType(zone_)),
ffi_type_(AsmType::FFIType(zone_)) {
ffi_type_(AsmType::FFIType(zone_)),
function_pointer_tables_(zone_) {
InitializeStdlib();
}
@ -283,6 +346,9 @@ void AsmTyper::InitializeStdlib() {
AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
auto* obj = import->obj();
auto* key = import->key()->AsLiteral();
if (key == nullptr) {
return nullptr;
}
ObjectTypeMap* stdlib = &stdlib_types_;
if (auto* obj_as_property = obj->AsProperty()) {
@ -345,7 +411,8 @@ AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
}
void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
info->FirstForwardUseIs(proxy);
MessageLocation location(script_, proxy->position(), proxy->position());
info->SetFirstForwardUse(location);
forward_definitions_.push_back(info);
}
@ -390,22 +457,58 @@ bool AsmTyper::AddLocal(Variable* variable, VariableInfo* info) {
void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
DCHECK_NE(type, AsmType::None());
DCHECK(node_types_.find(node) == node_types_.end());
node_types_.insert(std::make_pair(node, type));
if (in_function_) {
DCHECK(function_node_types_.find(node) == function_node_types_.end());
function_node_types_.insert(std::make_pair(node, type));
} else {
DCHECK(module_node_types_.find(node) == module_node_types_.end());
module_node_types_.insert(std::make_pair(node, type));
}
}
namespace {
bool IsLiteralDouble(Literal* literal) {
return literal->raw_value()->IsNumber() &&
literal->raw_value()->ContainsDot();
}
bool IsLiteralInt(Literal* literal) {
return literal->raw_value()->IsNumber() &&
!literal->raw_value()->ContainsDot();
}
bool IsLiteralMinus1(Literal* literal) {
return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == -1.0;
}
bool IsLiteral1Dot0(Literal* literal) {
return IsLiteralDouble(literal) && literal->raw_value()->AsNumber() == 1.0;
}
bool IsLiteral0(Literal* literal) {
return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == 0.0;
}
} // namespace
AsmType* AsmTyper::TypeOf(AstNode* node) const {
auto node_type_iter = node_types_.find(node);
if (node_type_iter != node_types_.end()) {
auto node_type_iter = function_node_types_.find(node);
if (node_type_iter != function_node_types_.end()) {
return node_type_iter->second;
}
node_type_iter = module_node_types_.find(node);
if (node_type_iter != module_node_types_.end()) {
return node_type_iter->second;
}
// Sometimes literal nodes are not added to the node_type_ map simply because
// their are not visited with ValidateExpression().
if (auto* literal = node->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) {
if (IsLiteralDouble(literal)) {
return AsmType::Double();
}
if (!IsLiteralInt(literal)) {
return AsmType::None();
}
uint32_t u;
if (literal->value()->ToUint32(&u)) {
if (u > LargestFixNum) {
@ -433,13 +536,39 @@ AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
return member;
}
AsmType* AsmTyper::FailWithMessage(const char* text) {
FAIL_RAW(root_, OneByteVector(text));
}
bool AsmTyper::Validate() {
if (!AsmType::None()->IsExactly(ValidateModule(root_))) {
return ValidateBeforeFunctionsPhase() &&
!AsmType::None()->IsExactly(ValidateModuleFunctions(root_)) &&
ValidateAfterFunctionsPhase();
}
bool AsmTyper::ValidateBeforeFunctionsPhase() {
if (!AsmType::None()->IsExactly(ValidateModuleBeforeFunctionsPhase(root_))) {
return true;
}
return false;
}
bool AsmTyper::ValidateInnerFunction(FunctionDeclaration* fun_decl) {
if (!AsmType::None()->IsExactly(ValidateModuleFunction(fun_decl))) {
return true;
}
return false;
}
bool AsmTyper::ValidateAfterFunctionsPhase() {
if (!AsmType::None()->IsExactly(ValidateModuleAfterFunctionsPhase(root_))) {
return true;
}
return false;
}
void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
namespace {
bool IsUseAsmDirective(Statement* first_statement) {
ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@ -477,91 +606,12 @@ Assignment* ExtractInitializerExpression(Statement* statement) {
} // namespace
// 6.1 ValidateModule
namespace {
// SourceLayoutTracker keeps track of the start and end positions of each
// section in the asm.js source. The sections should not overlap, otherwise the
// asm.js source is invalid.
class SourceLayoutTracker {
public:
SourceLayoutTracker() = default;
bool IsValid() const {
const Section* kAllSections[] = {&use_asm_, &globals_, &functions_,
&tables_, &exports_};
for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
const auto& curr_section = *kAllSections[ii];
for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
if (curr_section.OverlapsWith(*kAllSections[jj])) {
return false;
}
}
}
return true;
}
void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
private:
class Section {
public:
Section() = default;
Section(const Section&) = default;
Section& operator=(const Section&) = default;
void AddNewElement(const AstNode& node) {
const int node_pos = node.position();
if (start_ == kNoSourcePosition) {
start_ = node_pos;
} else {
start_ = std::max(start_, node_pos);
}
if (end_ == kNoSourcePosition) {
end_ = node_pos;
} else {
end_ = std::max(end_, node_pos);
}
}
bool OverlapsWith(const Section& other) const {
if (start_ == kNoSourcePosition) {
DCHECK_EQ(end_, kNoSourcePosition);
return false;
}
if (other.start_ == kNoSourcePosition) {
DCHECK_EQ(other.end_, kNoSourcePosition);
return false;
}
return other.start_ < end_ || other.end_ < start_;
}
private:
int start_ = kNoSourcePosition;
int end_ = kNoSourcePosition;
};
Section use_asm_;
Section globals_;
Section functions_;
Section tables_;
Section exports_;
DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
};
} // namespace
AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
SourceLayoutTracker source_layout;
AsmType* AsmTyper::ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun) {
DeclarationScope* scope = fun->scope();
if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
if (scope->inner_scope_calls_eval()) {
FAIL(fun, "Invalid asm.js module using eval.");
}
if (!ValidAsmIdentifier(fun->name()))
FAIL(fun, "Invalid asm.js identifier in module name.");
module_name_ = fun->name();
@ -594,7 +644,6 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
}
}
ZoneVector<Assignment*> function_pointer_tables(zone_);
FlattenedStatements iter(zone_, fun->body());
auto* use_asm_directive = iter.Next();
if (use_asm_directive == nullptr) {
@ -616,8 +665,8 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (!IsUseAsmDirective(use_asm_directive)) {
FAIL(fun, "Missing \"use asm\".");
}
source_layout.AddUseAsm(*use_asm_directive);
ReturnStatement* module_return = nullptr;
source_layout_.AddUseAsm(*use_asm_directive);
module_return_ = nullptr;
// *VIOLATION* The spec states that globals should be followed by function
// declarations, which should be followed by function pointer tables, followed
@ -627,40 +676,57 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (auto* assign = ExtractInitializerExpression(current)) {
if (assign->value()->IsArrayLiteral()) {
// Save function tables for later validation.
function_pointer_tables.push_back(assign);
function_pointer_tables_.push_back(assign);
} else {
RECURSE(ValidateGlobalDeclaration(assign));
source_layout.AddGlobal(*assign);
source_layout_.AddGlobal(*assign);
}
continue;
}
if (auto* current_as_return = current->AsReturnStatement()) {
if (module_return != nullptr) {
if (module_return_ != nullptr) {
FAIL(fun, "Multiple export statements.");
}
module_return = current_as_return;
source_layout.AddExport(*module_return);
module_return_ = current_as_return;
source_layout_.AddExport(*module_return_);
continue;
}
FAIL(current, "Invalid top-level statement in asm.js module.");
}
return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleFunction(FunctionDeclaration* fun_decl) {
RECURSE(ValidateFunction(fun_decl));
source_layout_.AddFunction(*fun_decl);
return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleFunctions(FunctionLiteral* fun) {
DeclarationScope* scope = fun->scope();
Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) {
if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
RECURSE(ValidateFunction(fun_decl));
source_layout.AddFunction(*fun_decl);
RECURSE(ValidateModuleFunction(fun_decl));
continue;
}
}
for (auto* function_table : function_pointer_tables) {
return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun) {
for (auto* function_table : function_pointer_tables_) {
RECURSE(ValidateFunctionTable(function_table));
source_layout.AddTable(*function_table);
source_layout_.AddTable(*function_table);
}
DeclarationScope* scope = fun->scope();
Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) {
if (decl->IsFunctionDeclaration()) {
continue;
@ -682,20 +748,20 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
}
// 6.2 ValidateExport
if (module_return == nullptr) {
if (module_return_ == nullptr) {
FAIL(fun, "Missing asm.js module export.");
}
for (auto* forward_def : forward_definitions_) {
if (forward_def->missing_definition()) {
FAIL(forward_def->first_forward_use(),
"Missing definition for forward declared identifier.");
FAIL_LOCATION(forward_def->source_location(),
"Missing definition for forward declared identifier.");
}
}
RECURSE(ValidateExport(module_return));
RECURSE(ValidateExport(module_return_));
if (!source_layout.IsValid()) {
if (!source_layout_.IsValid()) {
FAIL(fun, "Invalid asm.js source code layout.");
}
@ -714,8 +780,7 @@ bool IsDoubleAnnotation(BinaryOperation* binop) {
return false;
}
return right_as_literal->raw_value()->ContainsDot() &&
right_as_literal->raw_value()->AsNumber() == 1.0;
return IsLiteral1Dot0(right_as_literal);
}
bool IsIntAnnotation(BinaryOperation* binop) {
@ -728,8 +793,7 @@ bool IsIntAnnotation(BinaryOperation* binop) {
return false;
}
return !right_as_literal->raw_value()->ContainsDot() &&
right_as_literal->raw_value()->AsNumber() == 0.0;
return IsLiteral0(right_as_literal);
}
} // namespace
@ -894,6 +958,10 @@ AsmType* AsmTyper::ExportType(VariableProxy* fun_export) {
FAIL(fun_export, "Module export is not an asm.js function.");
}
if (!fun_export->var()->is_function()) {
FAIL(fun_export, "Module exports must be function declarations.");
}
return type;
}
@ -915,6 +983,10 @@ AsmType* AsmTyper::ValidateExport(ReturnStatement* exports) {
"Only normal object properties may be used in the export object "
"literal.");
}
if (!prop->key()->AsLiteral()->IsPropertyName()) {
FAIL(prop->key(),
"Exported functions must have valid identifier names.");
}
auto* export_obj = prop->value()->AsVariableProxy();
if (export_obj == nullptr) {
@ -1091,6 +1163,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
parameter_types.push_back(type);
SetTypeOf(proxy, type);
SetTypeOf(expr, type);
SetTypeOf(expr->value(), type);
}
if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
@ -1442,7 +1515,7 @@ bool ExtractInt32CaseLabel(CaseClause* clause, int32_t* lbl) {
return false;
}
if (lbl_expr->raw_value()->ContainsDot()) {
if (!IsLiteralInt(lbl_expr)) {
return false;
}
@ -1539,8 +1612,7 @@ bool IsInvert(BinaryOperation* binop) {
return false;
}
return !right_as_literal->raw_value()->ContainsDot() &&
right_as_literal->raw_value()->AsNumber() == -1.0;
return IsLiteralMinus1(right_as_literal);
}
bool IsUnaryMinus(BinaryOperation* binop) {
@ -1554,8 +1626,7 @@ bool IsUnaryMinus(BinaryOperation* binop) {
return false;
}
return !right_as_literal->raw_value()->ContainsDot() &&
right_as_literal->raw_value()->AsNumber() == -1.0;
return IsLiteralMinus1(right_as_literal);
}
} // namespace
@ -1684,7 +1755,7 @@ AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
return AsmType::Void();
}
if (literal->raw_value()->ContainsDot()) {
if (IsLiteralDouble(literal)) {
return AsmType::Double();
}
@ -1864,7 +1935,7 @@ bool IsIntishLiteralFactor(Expression* expr, int32_t* factor) {
return false;
}
if (literal->raw_value()->ContainsDot()) {
if (!IsLiteralInt(literal)) {
return false;
}
@ -2204,12 +2275,12 @@ AsmType* AsmTyper::ValidateBitwiseORExpression(BinaryOperation* binop) {
RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
return type;
}
// TODO(jpp): at this point we know that binop is expr|0. We could sinply
//
// RECURSE(t = ValidateExpression(left));
// FAIL_IF(t->IsNotA(Intish));
// return Signed;
AsmType* left_type;
RECURSE(left_type = ValidateExpression(left));
if (!left_type->IsA(AsmType::Intish())) {
FAIL(left, "Left side of |0 annotation must be intish.");
}
return AsmType::Signed();
}
auto* right = binop->right();
@ -2273,7 +2344,7 @@ bool ExtractIndirectCallMask(Expression* expr, uint32_t* value) {
return false;
}
if (as_literal->raw_value()->ContainsDot()) {
if (!IsLiteralInt(as_literal)) {
return false;
}
@ -2329,6 +2400,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false);
FAIL(call, "Redeclared global identifier.");
}
if (call->GetCallType() != Call::OTHER_CALL) {
FAIL(call, "Invalid call of existing global function.");
}
SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type);
return return_type;
@ -2359,6 +2433,10 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
FAIL(call, "Function invocation does not match function type.");
}
if (call->GetCallType() != Call::OTHER_CALL) {
FAIL(call, "Invalid forward call of global function.");
}
SetTypeOf(call_var_proxy, call_var_info->type());
SetTypeOf(call, return_type);
return return_type;
@ -2417,6 +2495,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false);
FAIL(call, "Redeclared global identifier.");
}
if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
FAIL(call, "Invalid call of existing function table.");
}
SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type);
return return_type;
@ -2441,6 +2522,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
"signature.");
}
if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
FAIL(call, "Invalid forward call of function table.");
}
SetTypeOf(call_property, previous_type->signature());
SetTypeOf(call, return_type);
return return_type;
@ -2457,7 +2541,7 @@ bool ExtractHeapAccessShift(Expression* expr, uint32_t* value) {
return false;
}
if (as_literal->raw_value()->ContainsDot()) {
if (!IsLiteralInt(as_literal)) {
return false;
}
@ -2501,7 +2585,7 @@ AsmType* AsmTyper::ValidateHeapAccess(Property* heap,
SetTypeOf(obj, obj_type);
if (auto* key_as_literal = heap->key()->AsLiteral()) {
if (key_as_literal->raw_value()->ContainsDot()) {
if (!IsLiteralInt(key_as_literal)) {
FAIL(key_as_literal, "Heap access index must be int.");
}
@ -2685,9 +2769,9 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
if (auto* literal = ret_expr->AsLiteral()) {
int32_t _;
if (literal->raw_value()->ContainsDot()) {
if (IsLiteralDouble(literal)) {
return AsmType::Double();
} else if (literal->value()->ToInt32(&_)) {
} else if (IsLiteralInt(literal) && literal->value()->ToInt32(&_)) {
return AsmType::Signed();
} else if (literal->IsUndefinedLiteral()) {
// *VIOLATION* The parser changes
@ -2728,13 +2812,15 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
AsmType* AsmTyper::VariableTypeAnnotations(
Expression* initializer, VariableInfo::Mutability mutability_type) {
if (auto* literal = initializer->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) {
if (IsLiteralDouble(literal)) {
SetTypeOf(initializer, AsmType::Double());
return AsmType::Double();
}
if (!IsLiteralInt(literal)) {
FAIL(initializer, "Invalid type annotation - forbidden literal.");
}
int32_t i32;
uint32_t u32;
AsmType* initializer_type = nullptr;
if (literal->value()->ToUint32(&u32)) {
if (u32 > LargestFixNum) {
@ -2793,13 +2879,17 @@ AsmType* AsmTyper::VariableTypeAnnotations(
"to fround.");
}
// Float constants must contain dots in local, but not in globals.
if (mutability_type == VariableInfo::kLocal) {
if (!src_expr->raw_value()->ContainsDot()) {
FAIL(initializer,
"Invalid float type annotation - expected literal argument to be a "
"floating point literal.");
}
// ERRATA: 5.4
// According to the spec: float constants must contain dots in local,
// but not in globals.
// However, the errata doc (and actual programs), use integer values
// with fround(..).
// Skipping the check that would go here to enforce this.
// Checking instead the literal expression is at least a number.
if (!src_expr->raw_value()->IsNumber()) {
FAIL(initializer,
"Invalid float type annotation - expected numeric literal for call "
"to fround.");
}
return AsmType::Float();
@ -2848,19 +2938,6 @@ AsmType* AsmTyper::NewHeapView(CallNew* new_heap_view) {
return heap_view_info->type();
}
bool IsValidAsm(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root, std::string* error_message) {
error_message->clear();
AsmTyper typer(isolate, zone, script, root);
if (typer.Validate()) {
return true;
}
*error_message = typer.error_message();
return false;
}
} // namespace wasm
} // namespace internal
} // namespace v8

80
deps/v8/src/asmjs/asm-typer.h

@ -7,6 +7,7 @@
#include <cstdint>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "src/allocation.h"
@ -15,6 +16,7 @@
#include "src/ast/ast-types.h"
#include "src/ast/ast.h"
#include "src/effects.h"
#include "src/messages.h"
#include "src/type-info.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@ -25,6 +27,7 @@ namespace wasm {
class AsmType;
class AsmTyperHarnessBuilder;
class SourceLayoutTracker;
class AsmTyper final {
public:
@ -66,16 +69,27 @@ class AsmTyper final {
};
~AsmTyper() = default;
AsmTyper(Isolate* isolate, Zone* zone, Script* script, FunctionLiteral* root);
AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
FunctionLiteral* root);
bool Validate();
// Do asm.js validation in phases (to interleave with conversion to wasm).
bool ValidateBeforeFunctionsPhase();
bool ValidateInnerFunction(FunctionDeclaration* decl);
bool ValidateAfterFunctionsPhase();
void ClearFunctionNodeTypes();
const char* error_message() const { return error_message_; }
Handle<JSMessageObject> error_message() const { return error_message_; }
const MessageLocation* message_location() const { return &message_location_; }
AsmType* TypeOf(AstNode* node) const;
AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var);
// Allow the asm-wasm-builder to trigger failures (for interleaved
// validating).
AsmType* FailWithMessage(const char* text);
typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
StdlibSet StdlibUses() const { return stdlib_uses_; }
@ -130,7 +144,7 @@ class AsmTyper final {
bool IsHeap() const { return standard_member_ == kHeap; }
void MarkDefined() { missing_definition_ = false; }
void FirstForwardUseIs(VariableProxy* var);
void SetFirstForwardUse(const MessageLocation& source_location);
StandardMember standard_member() const { return standard_member_; }
void set_standard_member(StandardMember standard_member) {
@ -145,7 +159,7 @@ class AsmTyper final {
bool missing_definition() const { return missing_definition_; }
VariableProxy* first_forward_use() const { return first_forward_use_; }
const MessageLocation* source_location() { return &source_location_; }
static VariableInfo* ForSpecialSymbol(Zone* zone,
StandardMember standard_member);
@ -157,9 +171,8 @@ class AsmTyper final {
// missing_definition_ is set to true for forward definition - i.e., use
// before definition.
bool missing_definition_ = false;
// first_forward_use_ holds the AST node that first referenced this
// VariableInfo. Used for error messages.
VariableProxy* first_forward_use_ = nullptr;
// Used for error messages.
MessageLocation source_location_;
};
// RAII-style manager for the in_function_ member variable.
@ -199,6 +212,40 @@ class AsmTyper final {
DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
};
class SourceLayoutTracker {
public:
SourceLayoutTracker() = default;
bool IsValid() const;
void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
private:
class Section {
public:
Section() = default;
Section(const Section&) = default;
Section& operator=(const Section&) = default;
void AddNewElement(const AstNode& node);
bool IsPrecededBy(const Section& other) const;
private:
int start_ = kNoSourcePosition;
int end_ = kNoSourcePosition;
};
Section use_asm_;
Section globals_;
Section functions_;
Section tables_;
Section exports_;
DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
};
using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
void InitializeStdlib();
void SetTypeOf(AstNode* node, AsmType* type);
@ -220,7 +267,10 @@ class AsmTyper final {
// validation failure.
// 6.1 ValidateModule
AsmType* ValidateModule(FunctionLiteral* fun);
AsmType* ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun);
AsmType* ValidateModuleFunction(FunctionDeclaration* fun_decl);
AsmType* ValidateModuleFunctions(FunctionLiteral* fun);
AsmType* ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun);
AsmType* ValidateGlobalDeclaration(Assignment* assign);
// 6.2 ValidateExport
AsmType* ExportType(VariableProxy* fun_export);
@ -323,7 +373,7 @@ class AsmTyper final {
Isolate* isolate_;
Zone* zone_;
Script* script_;
Handle<Script> script_;
FunctionLiteral* root_;
bool in_function_ = false;
@ -345,13 +395,19 @@ class AsmTyper final {
std::uintptr_t stack_limit_;
bool stack_overflow_ = false;
ZoneMap<AstNode*, AsmType*> node_types_;
static const int kErrorMessageLimit = 100;
std::unordered_map<AstNode*, AsmType*> module_node_types_;
std::unordered_map<AstNode*, AsmType*> function_node_types_;
static const int kErrorMessageLimit = 128;
AsmType* fround_type_;
AsmType* ffi_type_;
char error_message_[kErrorMessageLimit];
Handle<JSMessageObject> error_message_;
MessageLocation message_location_;
StdlibSet stdlib_uses_;
SourceLayoutTracker source_layout_;
ReturnStatement* module_return_;
ZoneVector<Assignment*> function_pointer_tables_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
};

1
deps/v8/src/asmjs/asm-types.cc

@ -6,6 +6,7 @@
#include <cinttypes>
#include "src/utils.h"
#include "src/v8.h"
namespace v8 {

465
deps/v8/src/asmjs/asm-wasm-builder.cc

@ -19,6 +19,11 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/isolate.h"
#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
@ -37,13 +42,14 @@ enum ValueFate { kDrop, kLeaveOnStack };
struct ForeignVariable {
Handle<Name> name;
Variable* var;
LocalType type;
ValueType type;
};
class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
AsmTyper* typer)
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
AstValueFactory* ast_value_factory, Handle<Script> script,
FunctionLiteral* literal, AsmTyper* typer)
: local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
functions_(ZoneHashMap::kDefaultHashMapCapacity,
@ -56,15 +62,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
literal_(literal),
isolate_(isolate),
zone_(zone),
info_(info),
ast_value_factory_(ast_value_factory),
script_(script),
typer_(typer),
typer_failed_(false),
typer_finished_(false),
breakable_blocks_(zone),
foreign_variables_(zone),
init_function_(nullptr),
foreign_init_function_(nullptr),
next_table_index_(0),
function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
imported_function_table_(this) {
imported_function_table_(this),
parent_binop_(nullptr) {
InitializeAstVisitor(isolate);
}
@ -90,10 +101,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
}
foreign_init_function_->Emit(kExprEnd);
}
i::Handle<i::FixedArray> GetForeignArgs() {
i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
Handle<FixedArray> GetForeignArgs() {
Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
static_cast<int>(foreign_variables_.size()));
for (size_t i = 0; i < foreign_variables_.size(); ++i) {
ForeignVariable* fv = &foreign_variables_[i];
@ -102,10 +114,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return ret;
}
void Build() {
bool Build() {
InitializeInitFunction();
RECURSE(VisitFunctionLiteral(literal_));
if (!typer_->ValidateBeforeFunctionsPhase()) {
return false;
}
DCHECK(!HasStackOverflow());
VisitFunctionLiteral(literal_);
if (HasStackOverflow()) {
return false;
}
if (!typer_finished_ && !typer_failed_) {
typer_->FailWithMessage("Module missing export section.");
typer_failed_ = true;
}
if (typer_failed_) {
return false;
}
BuildForeignInitFunction();
init_function_->Emit(kExprEnd); // finish init function.
return true;
}
void VisitVariableDeclaration(VariableDeclaration* decl) {}
@ -113,12 +141,65 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
FunctionLiteral* old_func = decl->fun();
Zone zone(isolate_->allocator(), ZONE_NAME);
DeclarationScope* new_func_scope = nullptr;
if (decl->fun()->body() == nullptr) {
// TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
// compiling the wasm module.
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
shared->set_is_toplevel(false);
ParseInfo info(&zone, script_);
info.set_shared_info(shared);
info.set_toplevel(false);
info.set_language_mode(decl->fun()->scope()->language_mode());
info.set_allow_lazy_parsing(false);
info.set_function_literal_id(shared->function_literal_id());
info.set_ast_value_factory(ast_value_factory_);
info.set_ast_value_factory_owned(false);
// Create fresh function scope to use to parse the function in.
new_func_scope = new (info.zone()) DeclarationScope(
info.zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info.set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(&info)) {
typer_failed_ = true;
return;
}
FunctionLiteral* func = info.literal();
DCHECK_NOT_NULL(func);
decl->set_fun(func);
}
if (!typer_->ValidateInnerFunction(decl)) {
typer_failed_ = true;
decl->set_fun(old_func);
if (new_func_scope != nullptr) {
DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
UNREACHABLE();
}
}
return;
}
current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
scope_ = kFuncScope;
// Record start of the function, used as position for the stack check.
current_function_builder_->SetAsmFunctionStartPosition(
decl->fun()->start_position());
RECURSE(Visit(decl->fun()));
decl->set_fun(old_func);
if (new_func_scope != nullptr) {
DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
UNREACHABLE();
}
}
scope_ = kModuleScope;
current_function_builder_ = nullptr;
local_variables_.Clear();
typer_->ClearFunctionNodeTypes();
}
void VisitStatements(ZoneList<Statement*>* stmts) {
@ -129,7 +210,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
continue;
}
RECURSE(Visit(stmt));
if (stmt->IsJump()) break;
if (typer_failed_) break;
}
}
@ -204,6 +285,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
@ -245,6 +328,16 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitReturnStatement(ReturnStatement* stmt) {
if (scope_ == kModuleScope) {
if (typer_finished_) {
typer_->FailWithMessage("Module has multiple returns.");
typer_failed_ = true;
return;
}
if (!typer_->ValidateAfterFunctionsPhase()) {
typer_failed_ = true;
return;
}
typer_finished_ = true;
scope_ = kExportScope;
RECURSE(Visit(stmt->expression()));
scope_ = kModuleScope;
@ -440,16 +533,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// Add the parameters for the function.
const auto& arguments = func_type->Arguments();
for (int i = 0; i < expr->parameter_count(); ++i) {
LocalType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type);
ValueType type = TypeFrom(arguments[i]);
DCHECK_NE(kWasmStmt, type);
InsertParameter(scope->parameter(i), type, i);
}
} else {
UNREACHABLE();
}
}
RECURSE(VisitStatements(expr->body()));
RECURSE(VisitDeclarations(scope->declarations()));
if (typer_failed_) return;
RECURSE(VisitStatements(expr->body()));
if (scope_ == kFuncScope) {
// Finish the function-body scope block.
current_function_builder_->Emit(kExprEnd);
}
}
void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@ -461,18 +559,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->condition()));
// WASM ifs come with implicit blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
LocalTypeCode type;
ValueTypeCode type;
switch (TypeOf(expr)) {
case kAstI32:
case kWasmI32:
type = kLocalI32;
break;
case kAstI64:
case kWasmI64:
type = kLocalI64;
break;
case kAstF32:
case kWasmF32:
type = kLocalF32;
break;
case kAstF64:
case kWasmF64:
type = kLocalF64;
break;
default:
@ -544,8 +642,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (VisitStdlibConstant(var)) {
return;
}
LocalType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type);
ValueType var_type = TypeOf(expr);
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
current_function_builder_->EmitWithVarInt(
kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
@ -638,12 +736,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* name = prop->key()->AsLiteral();
DCHECK_NOT_NULL(name);
DCHECK(name->IsPropertyName());
const AstRawString* raw_name = name->AsRawPropertyName();
Handle<String> function_name = name->AsPropertyName();
int length;
std::unique_ptr<char[]> utf8 = function_name->ToCString(
DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
if (var->is_function()) {
WasmFunctionBuilder* function = LookupOrInsertFunction(var);
function->Export();
function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
raw_name->length()});
function->ExportAs({utf8.get(), length});
}
}
}
@ -660,53 +759,67 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_ = nullptr;
}
void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
auto* func_tbl_type = typer_->TypeOf(funcs)->AsFunctionTableType();
DCHECK_NOT_NULL(func_tbl_type);
auto* func_type = func_tbl_type->signature()->AsFunctionType();
struct FunctionTableIndices : public ZoneObject {
uint32_t start_index;
uint32_t signature_index;
};
FunctionTableIndices* LookupOrAddFunctionTable(VariableProxy* table,
Property* p) {
FunctionTableIndices* indices = LookupFunctionTable(table->var());
if (indices != nullptr) {
// Already setup.
return indices;
}
indices = new (zone()) FunctionTableIndices();
auto* func_type = typer_->TypeOf(p)->AsFunctionType();
auto* func_table_type = typer_->TypeOf(p->obj()->AsVariableProxy()->var())
->AsFunctionTableType();
const auto& arguments = func_type->Arguments();
LocalType return_type = TypeFrom(func_type->ReturnType());
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
ValueType return_type = TypeFrom(func_type->ReturnType());
FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size());
if (return_type != kAstStmt) {
if (return_type != kWasmStmt) {
sig.AddReturn(return_type);
}
for (auto* arg : arguments) {
sig.AddParam(TypeFrom(arg));
}
uint32_t signature_index = builder_->AddSignature(sig.Build());
InsertFunctionTable(table->var(), next_table_index_, signature_index);
next_table_index_ += funcs->values()->length();
for (int i = 0; i < funcs->values()->length(); ++i) {
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
builder_->AddIndirectFunction(
LookupOrInsertFunction(func->var())->func_index());
}
}
struct FunctionTableIndices : public ZoneObject {
uint32_t start_index;
uint32_t signature_index;
};
void InsertFunctionTable(Variable* v, uint32_t start_index,
uint32_t signature_index) {
FunctionTableIndices* container = new (zone()) FunctionTableIndices();
container->start_index = start_index;
container->signature_index = signature_index;
indices->start_index = builder_->AllocateIndirectFunctions(
static_cast<uint32_t>(func_table_type->length()));
indices->signature_index = signature_index;
ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(zone()));
entry->value = container;
table->var(), ComputePointerHash(table->var()),
ZoneAllocationPolicy(zone()));
entry->value = indices;
return indices;
}
FunctionTableIndices* LookupFunctionTable(Variable* v) {
ZoneHashMap::Entry* entry =
function_tables_.Lookup(v, ComputePointerHash(v));
DCHECK_NOT_NULL(entry);
if (entry == nullptr) {
return nullptr;
}
return reinterpret_cast<FunctionTableIndices*>(entry->value);
}
void PopulateFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
FunctionTableIndices* indices = LookupFunctionTable(table->var());
// Ignore unused function tables.
if (indices == nullptr) {
return;
}
for (int i = 0; i < funcs->values()->length(); ++i) {
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
builder_->SetIndirectFunction(
indices->start_index + i,
LookupOrInsertFunction(func->var())->func_index());
}
}
class ImportedFunctionTable {
private:
class ImportedFunctionIndices : public ZoneObject {
@ -727,20 +840,33 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ZoneAllocationPolicy(builder->zone())),
builder_(builder) {}
void AddImport(Variable* v, const char* name, int name_length) {
ImportedFunctionIndices* indices = new (builder_->zone())
ImportedFunctionIndices(name, name_length, builder_->zone());
ImportedFunctionIndices* LookupOrInsertImport(Variable* v) {
auto* entry = table_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
entry->value = indices;
ImportedFunctionIndices* indices;
if (entry->value == nullptr) {
indices = new (builder_->zone())
ImportedFunctionIndices(nullptr, 0, builder_->zone());
entry->value = indices;
} else {
indices = reinterpret_cast<ImportedFunctionIndices*>(entry->value);
}
return indices;
}
void SetImportName(Variable* v, const char* name, int name_length) {
auto* indices = LookupOrInsertImport(v);
indices->name_ = name;
indices->name_length_ = name_length;
for (auto i : indices->signature_to_index_) {
builder_->builder_->SetImportName(i.second, indices->name_,
indices->name_length_);
}
}
// Get a function's index (or allocate if new).
uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) {
ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
DCHECK_NOT_NULL(entry);
ImportedFunctionIndices* indices =
reinterpret_cast<ImportedFunctionIndices*>(entry->value);
uint32_t LookupOrInsertImportUse(Variable* v, FunctionSig* sig) {
auto* indices = LookupOrInsertImport(v);
WasmModuleBuilder::SignatureMap::iterator pos =
indices->signature_to_index_.find(sig);
if (pos != indices->signature_to_index_.end()) {
@ -819,8 +945,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (target_var != nullptr) {
// Left hand side is a local or a global variable.
Variable* var = target_var->var();
LocalType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type);
ValueType var_type = TypeOf(expr);
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
uint32_t index = LookupOrInsertGlobal(var, var_type);
current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
@ -841,7 +967,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Property* target_prop = expr->target()->AsProperty();
if (target_prop != nullptr) {
// Left hand side is a property access, i.e. the asm.js heap.
if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
if (TypeOf(expr->value()) == kWasmF64 && expr->target()->IsProperty() &&
typer_->TypeOf(expr->target()->AsProperty()->obj())
->IsA(AsmType::Float32Array())) {
current_function_builder_->Emit(kExprF32ConvertF64);
@ -901,7 +1027,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
const AstRawString* name =
prop->key()->AsLiteral()->AsRawPropertyName();
imported_function_table_.AddImport(
imported_function_table_.SetImportName(
target->var(), reinterpret_cast<const char*>(name->raw_data()),
name->length());
}
@ -910,14 +1036,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return;
}
ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
if (funcs != nullptr &&
typer_->TypeOf(funcs)
->AsFunctionTableType()
->signature()
->AsFunctionType()) {
if (funcs != nullptr) {
VariableProxy* target = expr->target()->AsVariableProxy();
DCHECK_NOT_NULL(target);
AddFunctionTable(target, funcs);
PopulateFunctionTable(target, funcs);
// Only add to the function table. No init needed.
return;
}
@ -952,8 +1074,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(key_literal);
if (!key_literal->value().is_null()) {
Handle<Name> name =
i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
LocalType type = is_float ? kAstF64 : kAstI32;
Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
ValueType type = is_float ? kWasmF64 : kWasmI32;
foreign_variables_.push_back({name, var, type});
}
}
@ -961,7 +1083,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
Expression* obj = expr->obj();
*atype = typer_->TypeOf(obj);
int size = (*atype)->ElementSizeInBytes();
int32_t size = (*atype)->ElementSizeInBytes();
if (size == 1) {
// Allow more general expression in byte arrays than the spec
// strictly permits.
@ -974,7 +1096,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* value = expr->key()->AsLiteral();
if (value) {
DCHECK(value->raw_value()->IsNumber());
DCHECK_EQ(kAstI32, TypeOf(value));
DCHECK_EQ(kWasmI32, TypeOf(value));
int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
// TODO(titzer): handle overflow here.
current_function_builder_->EmitI32Const(val * size);
@ -984,14 +1106,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (binop) {
DCHECK_EQ(Token::SAR, binop->op());
DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
DCHECK(kWasmI32 == TypeOf(binop->right()->AsLiteral()));
DCHECK_EQ(size,
1 << static_cast<int>(
binop->right()->AsLiteral()->raw_value()->AsNumber()));
// Mask bottom bits to match asm.js behavior.
byte mask = static_cast<byte>(~(size - 1));
RECURSE(Visit(binop->left()));
current_function_builder_->EmitWithU8(kExprI8Const, mask);
current_function_builder_->EmitI32Const(~(size - 1));
current_function_builder_->Emit(kExprI32And);
return;
}
@ -1030,7 +1151,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper::StandardMember standard_object =
typer_->VariableAsStandardMember(var);
ZoneList<Expression*>* args = call->arguments();
LocalType call_type = TypeOf(call);
ValueType call_type = TypeOf(call);
switch (standard_object) {
case AsmTyper::kNone: {
@ -1038,57 +1159,57 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathAcos: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Acos);
break;
}
case AsmTyper::kMathAsin: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Asin);
break;
}
case AsmTyper::kMathAtan: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan);
break;
}
case AsmTyper::kMathCos: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Cos);
break;
}
case AsmTyper::kMathSin: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Sin);
break;
}
case AsmTyper::kMathTan: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Tan);
break;
}
case AsmTyper::kMathExp: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Exp);
break;
}
case AsmTyper::kMathLog: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Log);
break;
}
case AsmTyper::kMathCeil: {
VisitCallArgs(call);
if (call_type == kAstF32) {
if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Ceil);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Ceil);
} else {
UNREACHABLE();
@ -1097,9 +1218,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathFloor: {
VisitCallArgs(call);
if (call_type == kAstF32) {
if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Floor);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Floor);
} else {
UNREACHABLE();
@ -1108,9 +1229,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathSqrt: {
VisitCallArgs(call);
if (call_type == kAstF32) {
if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Sqrt);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Sqrt);
} else {
UNREACHABLE();
@ -1119,18 +1240,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathClz32: {
VisitCallArgs(call);
DCHECK(call_type == kAstI32);
DCHECK(call_type == kWasmI32);
current_function_builder_->Emit(kExprI32Clz);
break;
}
case AsmTyper::kMathAbs: {
if (call_type == kAstI32) {
WasmTemporary tmp(current_function_builder_, kAstI32);
if (call_type == kWasmI32) {
WasmTemporary tmp(current_function_builder_, kWasmI32);
// if set_local(tmp, x) < 0
Visit(call->arguments()->at(0));
current_function_builder_->EmitTeeLocal(tmp.index());
byte code[] = {WASM_I8(0)};
byte code[] = {WASM_ZERO};
current_function_builder_->EmitCode(code, sizeof(code));
current_function_builder_->Emit(kExprI32LtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
@ -1146,10 +1267,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// end
current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) {
} else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Abs);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Abs);
} else {
@ -1159,9 +1280,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathMin: {
// TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
if (call_type == kAstI32) {
WasmTemporary tmp_x(current_function_builder_, kAstI32);
WasmTemporary tmp_y(current_function_builder_, kAstI32);
if (call_type == kWasmI32) {
WasmTemporary tmp_x(current_function_builder_, kWasmI32);
WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
@ -1181,10 +1302,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_y.index());
current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) {
} else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Min);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Min);
} else {
@ -1194,9 +1315,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathMax: {
// TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
if (call_type == kAstI32) {
WasmTemporary tmp_x(current_function_builder_, kAstI32);
WasmTemporary tmp_y(current_function_builder_, kAstI32);
if (call_type == kWasmI32) {
WasmTemporary tmp_x(current_function_builder_, kWasmI32);
WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
@ -1217,10 +1338,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_x.index());
current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) {
} else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Max);
} else if (call_type == kAstF64) {
} else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Max);
} else {
@ -1230,13 +1351,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathAtan2: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan2);
break;
}
case AsmTyper::kMathPow: {
VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Pow);
break;
}
@ -1298,6 +1419,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
bool VisitCallExpression(Call* expr) {
Call::CallType call_type = expr->GetCallType();
bool returns_value = true;
// Save the parent now, it might be overwritten in VisitCallArgs.
BinaryOperation* parent_binop = parent_binop_;
switch (call_type) {
case Call::OTHER_CALL: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@ -1313,11 +1438,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VariableProxy* vp = expr->expression()->AsVariableProxy();
DCHECK_NOT_NULL(vp);
if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
LocalType return_type = TypeOf(expr);
ValueType return_type = TypeOf(expr);
ZoneList<Expression*>* args = expr->arguments();
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
args->length());
if (return_type != kAstStmt) {
if (return_type != kWasmStmt) {
sig.AddReturn(return_type);
} else {
returns_value = false;
@ -1325,16 +1450,23 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
for (int i = 0; i < args->length(); ++i) {
sig.AddParam(TypeOf(args->at(i)));
}
uint32_t index = imported_function_table_.LookupOrInsertImport(
uint32_t index = imported_function_table_.LookupOrInsertImportUse(
vp->var(), sig.Build());
VisitCallArgs(expr);
current_function_builder_->AddAsmWasmOffset(expr->position());
// For non-void functions, we must know the parent node.
DCHECK_IMPLIES(returns_value, parent_binop != nullptr);
DCHECK_IMPLIES(returns_value, parent_binop->left() == expr ||
parent_binop->right() == expr);
int pos = expr->position();
int parent_pos = returns_value ? parent_binop->position() : pos;
current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index);
} else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
current_function_builder_->AddAsmWasmOffset(expr->position());
current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitDirectCallIndex(
function->func_index());
@ -1348,19 +1480,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(p);
VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var);
FunctionTableIndices* indices = LookupFunctionTable(var->var());
FunctionTableIndices* indices = LookupOrAddFunctionTable(var, p);
Visit(p->key()); // TODO(titzer): should use RECURSE()
// We have to use a temporary for the correct order of evaluation.
current_function_builder_->EmitI32Const(indices->start_index);
current_function_builder_->Emit(kExprI32Add);
WasmTemporary tmp(current_function_builder_, kAstI32);
WasmTemporary tmp(current_function_builder_, kWasmI32);
current_function_builder_->EmitSetLocal(tmp.index());
VisitCallArgs(expr);
current_function_builder_->EmitGetLocal(tmp.index());
current_function_builder_->AddAsmWasmOffset(expr->position());
current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitVarInt(indices->signature_index);
current_function_builder_->EmitVarInt(0); // table index
@ -1383,7 +1516,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->expression()));
switch (expr->op()) {
case Token::NOT: {
DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
DCHECK_EQ(kWasmI32, TypeOf(expr->expression()));
current_function_builder_->Emit(kExprI32Eqz);
break;
}
@ -1398,10 +1531,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int32_t val) {
DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstI32) {
TypeOf(expr) == kWasmI32) {
Literal* right = expr->right()->AsLiteral();
DCHECK(right->raw_value()->IsNumber());
if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
if (right->raw_value()->IsNumber() &&
static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
return true;
}
}
@ -1412,7 +1545,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
double val) {
DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstF64) {
TypeOf(expr) == kWasmF64) {
Literal* right = expr->right()->AsLiteral();
DCHECK(right->raw_value()->IsNumber());
if (right->raw_value()->AsNumber() == val) {
@ -1426,7 +1559,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchOr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
(TypeOf(expr->left()) == kAstI32)) {
(TypeOf(expr->left()) == kWasmI32)) {
return kAsIs;
} else {
return kNone;
@ -1436,7 +1569,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchShr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
// TODO(titzer): this probably needs to be kToUint
return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
return (TypeOf(expr->left()) == kWasmI32) ? kAsIs : kToInt;
} else {
return kNone;
}
@ -1444,13 +1577,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchXor(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
DCHECK_EQ(kAstI32, TypeOf(expr->left()));
DCHECK_EQ(kAstI32, TypeOf(expr->right()));
DCHECK_EQ(kWasmI32, TypeOf(expr->left()));
DCHECK_EQ(kWasmI32, TypeOf(expr->right()));
BinaryOperation* op = expr->left()->AsBinaryOperation();
if (op != nullptr) {
if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
DCHECK_EQ(kAstI32, TypeOf(op->right()));
if (TypeOf(op->left()) != kAstI32) {
DCHECK_EQ(kWasmI32, TypeOf(op->right()));
if (TypeOf(op->left()) != kWasmI32) {
return kToInt;
} else {
return kAsIs;
@ -1463,8 +1596,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchMul(BinaryOperation* expr) {
if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
DCHECK_EQ(kAstF64, TypeOf(expr->right()));
if (TypeOf(expr->left()) != kAstF64) {
DCHECK_EQ(kWasmF64, TypeOf(expr->right()));
if (TypeOf(expr->left()) != kWasmF64) {
return kToDouble;
} else {
return kAsIs;
@ -1532,6 +1665,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitBinaryOperation(BinaryOperation* expr) {
ConvertOperation convertOperation = MatchBinaryOperation(expr);
static const bool kDontIgnoreSign = false;
parent_binop_ = expr;
if (convertOperation == kToDouble) {
RECURSE(Visit(expr->left()));
TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
@ -1694,6 +1828,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) {
RECURSE(Visit(decl));
if (typer_failed_) {
return;
}
}
}
@ -1719,7 +1856,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index;
};
uint32_t LookupOrInsertLocal(Variable* v, LocalType type) {
uint32_t LookupOrInsertLocal(Variable* v, ValueType type) {
DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
local_variables_.Lookup(v, ComputePointerHash(v));
@ -1736,7 +1873,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
void InsertParameter(Variable* v, LocalType type, uint32_t index) {
void InsertParameter(Variable* v, ValueType type, uint32_t index) {
DCHECK(v->IsParameter());
DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
@ -1749,7 +1886,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
entry->value = container;
}
uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) {
uint32_t LookupOrInsertGlobal(Variable* v, ValueType type) {
ZoneHashMap::Entry* entry =
global_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
@ -1770,14 +1907,14 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
auto* func_type = typer_->TypeOf(v)->AsFunctionType();
DCHECK_NOT_NULL(func_type);
// Build the signature for the function.
LocalType return_type = TypeFrom(func_type->ReturnType());
ValueType return_type = TypeFrom(func_type->ReturnType());
const auto& arguments = func_type->Arguments();
FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
FunctionSig::Builder b(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size());
if (return_type != kAstStmt) b.AddReturn(return_type);
if (return_type != kWasmStmt) b.AddReturn(return_type);
for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
LocalType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type);
ValueType type = TypeFrom(arguments[i]);
DCHECK_NE(kWasmStmt, type);
b.AddParam(type);
}
@ -1792,22 +1929,22 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
}
LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
ValueType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
LocalType TypeFrom(AsmType* type) {
ValueType TypeFrom(AsmType* type) {
if (type->IsA(AsmType::Intish())) {
return kAstI32;
return kWasmI32;
}
if (type->IsA(AsmType::Floatish())) {
return kAstF32;
return kWasmF32;
}
if (type->IsA(AsmType::DoubleQ())) {
return kAstF64;
return kWasmF64;
}
return kAstStmt;
return kWasmStmt;
}
Zone* zone() { return zone_; }
@ -1821,7 +1958,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
FunctionLiteral* literal_;
Isolate* isolate_;
Zone* zone_;
CompilationInfo* info_;
AstValueFactory* ast_value_factory_;
Handle<Script> script_;
AsmTyper* typer_;
bool typer_failed_;
bool typer_finished_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_;
WasmFunctionBuilder* init_function_;
@ -1829,6 +1971,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t next_table_index_;
ZoneHashMap function_tables_;
ImportedFunctionTable imported_function_table_;
// Remember the parent node for reporting the correct location for ToNumber
// conversions after calls.
BinaryOperation* parent_binop_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@ -1836,22 +1981,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
};
AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
FunctionLiteral* literal, AsmTyper* typer)
: isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {}
AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
: info_(info),
typer_(info->isolate(), info->zone(), info->script(), info->literal()) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
AsmWasmBuilder::Result AsmWasmBuilder::Run(
i::Handle<i::FixedArray>* foreign_args) {
AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
impl.Build();
AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
Zone* zone = info_->zone();
AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
info_->parse_info()->ast_value_factory(),
info_->script(), info_->literal(), &typer_);
bool success = impl.Build();
*foreign_args = impl.GetForeignArgs();
ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_);
ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteTo(*module_buffer);
ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_);
ZoneBuffer* asm_offsets_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
return {module_buffer, asm_offsets_buffer};
return {module_buffer, asm_offsets_buffer, success};
}
const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";

14
deps/v8/src/asmjs/asm-wasm-builder.h

@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
class FunctionLiteral;
class CompilationInfo;
namespace wasm {
@ -23,20 +23,20 @@ class AsmWasmBuilder {
struct Result {
ZoneBuffer* module_bytes;
ZoneBuffer* asm_offset_table;
bool success;
};
explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
AsmTyper* typer);
explicit AsmWasmBuilder(CompilationInfo* info);
Result Run(Handle<FixedArray>* foreign_args);
static const char* foreign_init_name;
static const char* single_function_name;
const AsmTyper* typer() { return &typer_; }
private:
Isolate* isolate_;
Zone* zone_;
FunctionLiteral* literal_;
AsmTyper* typer_;
CompilationInfo* info_;
AsmTyper typer_;
};
} // namespace wasm
} // namespace internal

32
deps/v8/src/assembler-inl.h

@ -0,0 +1,32 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASSEMBLER_INL_H_
#define V8_ASSEMBLER_INL_H_
#include "src/assembler.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif
#endif // V8_ASSEMBLER_INL_H_

74
deps/v8/src/assembler.cc

@ -35,8 +35,11 @@
#include "src/assembler.h"
#include <math.h>
#include <string.h>
#include <cmath>
#include "src/api.h"
#include "src/assembler-inl.h"
#include "src/base/cpu.h"
#include "src/base/functional.h"
#include "src/base/ieee754.h"
@ -62,28 +65,6 @@
#include "src/snapshot/serializer-common.h"
#include "src/wasm/wasm-external-refs.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
#error "Unknown architecture."
#endif
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@ -353,8 +334,7 @@ void RelocInfo::update_wasm_memory_reference(
uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_memory_size(updated_size_reference,
icache_flush_mode);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
} else {
UNREACHABLE();
}
@ -378,6 +358,18 @@ void RelocInfo::update_wasm_global_reference(
}
}
void RelocInfo::update_wasm_function_table_size_reference(
uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
uint32_t current_size_reference = wasm_function_table_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
@ -782,14 +774,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at tail call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
return "generator continuation";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
case WASM_MEMORY_SIZE_REFERENCE:
return "wasm memory size reference";
case WASM_GLOBAL_REFERENCE:
return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@ -884,10 +876,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case GENERATOR_CONTINUATION:
case WASM_MEMORY_REFERENCE:
case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case NONE32:
case NONE64:
break;
@ -1204,6 +1196,12 @@ ExternalReference ExternalReference::f64_mod_wrapper_function(
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
}
ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
}
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
return ExternalReference(
@ -1548,6 +1546,14 @@ ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
}
void* libc_memchr(void* string, int character, size_t search_length) {
return memchr(string, character, search_length);
}
ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
@ -1569,11 +1575,19 @@ ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
return ExternalReference(isolate->promise_hook_address());
}
ExternalReference ExternalReference::debug_is_active_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->is_active_address());
}
ExternalReference ExternalReference::debug_hook_on_function_call_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->hook_on_function_call_address());
}
ExternalReference ExternalReference::debug_after_break_target_address(
Isolate* isolate) {
@ -1914,12 +1928,6 @@ void Assembler::RecordComment(const char* msg) {
}
void Assembler::RecordGeneratorContinuation() {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
}
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));

47
deps/v8/src/assembler.h

@ -395,6 +395,7 @@ class RelocInfo {
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@ -413,9 +414,6 @@ class RelocInfo {
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
// Continuation points for a generator yield.
GENERATOR_CONTINUATION,
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@ -440,7 +438,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE,
LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@ -524,9 +522,6 @@ class RelocInfo {
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline bool IsGeneratorContinuation(Mode mode) {
return mode == GENERATOR_CONTINUATION;
}
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
@ -536,6 +531,22 @@ class RelocInfo {
static inline bool IsWasmGlobalReference(Mode mode) {
return mode == WASM_GLOBAL_REFERENCE;
}
static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmSizeReference(Mode mode) {
return mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@ -564,6 +575,7 @@ class RelocInfo {
Address wasm_memory_reference();
Address wasm_global_reference();
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference(
Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
@ -571,6 +583,9 @@ class RelocInfo {
void update_wasm_global_reference(
Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@ -679,8 +694,7 @@ class RelocInfo {
private:
void unchecked_update_wasm_memory_reference(Address address,
ICacheFlushMode flush_mode);
void unchecked_update_wasm_memory_size(uint32_t size,
ICacheFlushMode flush_mode);
void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode);
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
@ -949,6 +963,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
// Trap callback function for cctest/wasm/wasm-run-utils.h
static ExternalReference wasm_call_trap_callback_for_testing(
Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
@ -1031,6 +1049,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference ieee754_tan_function(Isolate* isolate);
static ExternalReference ieee754_tanh_function(Isolate* isolate);
static ExternalReference libc_memchr_function(Isolate* isolate);
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
@ -1041,12 +1061,16 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_hook_on_function_call_address(
Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
static ExternalReference promise_hook_address(Isolate* isolate);
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
@ -1117,6 +1141,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Utility functions
void* libc_memchr(void* string, int character, size_t search_length);
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
@ -1144,7 +1169,7 @@ class CallWrapper {
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
// Return whether call needs to check for debug stepping.
virtual bool NeedsDebugStepCheck() const { return false; }
virtual bool NeedsDebugHookCheck() const { return false; }
};
@ -1163,7 +1188,7 @@ class CheckDebugStepCallWrapper : public CallWrapper {
virtual ~CheckDebugStepCallWrapper() {}
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {}
virtual bool NeedsDebugStepCheck() const { return true; }
virtual bool NeedsDebugHookCheck() const { return true; }
};

10
deps/v8/src/assert-scope.cc

@ -83,15 +83,21 @@ PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
if (data_ == nullptr) return;
Release();
}
template <PerThreadAssertType kType, bool kAllow>
void PerThreadAssertScope<kType, kAllow>::Release() {
DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_);
if (data_->DecrementLevel()) {
PerThreadAssertData::SetCurrent(NULL);
delete data_;
}
data_ = nullptr;
}
// static
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
@ -149,6 +155,8 @@ template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, false>;
template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
} // namespace internal
} // namespace v8

23
deps/v8/src/assert-scope.h

@ -26,12 +26,12 @@ enum PerThreadAssertType {
LAST_PER_THREAD_ASSERT_TYPE
};
enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS,
DEOPTIMIZATION_ASSERT,
COMPILATION_ASSERT
COMPILATION_ASSERT,
NO_EXCEPTION_ASSERT
};
template <PerThreadAssertType kType, bool kAllow>
@ -42,6 +42,8 @@ class PerThreadAssertScope {
V8_EXPORT_PRIVATE static bool IsAllowed();
void Release();
private:
PerThreadAssertData* data_;
bool old_state_;
@ -76,6 +78,7 @@ class PerThreadAssertScopeDebugOnly : public
class PerThreadAssertScopeDebugOnly {
public:
PerThreadAssertScopeDebugOnly() { }
void Release() {}
#endif
};
@ -147,6 +150,14 @@ typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecution;
// Scope to document where we do not expect javascript execution (debug only)
typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>
DisallowJavascriptExecutionDebugOnly;
// Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly.
typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecutionDebugOnly;
// Scope in which javascript execution leads to exception being thrown.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
ThrowOnJavascriptExecution;
@ -170,6 +181,14 @@ typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
// Scope to introduce an exception to DisallowDeoptimization.
typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
AllowCompilation;
// Scope to document where we do not expect exceptions.
typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
DisallowExceptions;
// Scope to introduce an exception to DisallowExceptions.
typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
AllowExceptions;
} // namespace internal
} // namespace v8

6
deps/v8/src/ast/ast-expression-rewriter.cc

@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast.h"
#include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -372,6 +373,9 @@ void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node);

29
deps/v8/src/ast/ast-function-literal-id-reindexer.cc

@ -0,0 +1,29 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast-function-literal-id-reindexer.h"
#include "src/objects-inl.h"
#include "src/ast/ast.h"
namespace v8 {
namespace internal {
AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
int delta)
: AstTraversalVisitor(stack_limit), delta_(delta) {}
AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
Visit(pattern);
}
void AstFunctionLiteralIdReindexer::VisitFunctionLiteral(FunctionLiteral* lit) {
AstTraversalVisitor::VisitFunctionLiteral(lit);
lit->set_function_literal_id(lit->function_literal_id() + delta_);
}
} // namespace internal
} // namespace v8

36
deps/v8/src/ast/ast-function-literal-id-reindexer.h

@ -0,0 +1,36 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
namespace v8 {
namespace internal {
// Changes the ID of all FunctionLiterals in the given Expression by adding the
// given delta.
class AstFunctionLiteralIdReindexer final
: public AstTraversalVisitor<AstFunctionLiteralIdReindexer> {
public:
AstFunctionLiteralIdReindexer(size_t stack_limit, int delta);
~AstFunctionLiteralIdReindexer();
void Reindex(Expression* pattern);
// AstTraversalVisitor implementation.
void VisitFunctionLiteral(FunctionLiteral* lit);
private:
int delta_;
DISALLOW_COPY_AND_ASSIGN(AstFunctionLiteralIdReindexer);
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER

4
deps/v8/src/ast/ast-literal-reindexer.cc

@ -6,6 +6,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -186,6 +187,9 @@ void AstLiteralReindexer::VisitSpread(Spread* node) {
void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
void AstLiteralReindexer::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
Visit(node->each());

119
deps/v8/src/ast/ast-numbering.cc

@ -6,22 +6,26 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals)
: zone_(zone),
eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0),
properties_(zone),
slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT) {
InitializeAstVisitor(isolate);
InitializeAstVisitor(stack_limit);
}
bool Renumber(FunctionLiteral* node);
@ -55,25 +59,28 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
void DisableCrankshaft(BailoutReason reason) {
properties_.flags() |= AstProperties::kDontCrankshaft;
void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
disable_crankshaft_reason_ = reason;
properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
}
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(),
&slot_cache_);
node->AssignFeedbackVectorSlots(properties_.get_spec(), &slot_cache_);
}
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Isolate* isolate_;
Zone* zone() const { return zone_; }
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_;
int yield_count_;
AstProperties properties_;
// The slot cache allows us to reuse certain feedback vector slots.
FeedbackVectorSlotCache slot_cache_;
BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
@ -122,6 +129,7 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
@ -149,10 +157,11 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
DisableFullCodegenAndCrankshaft(
kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
DisableCrankshaft(kReferenceToModuleVariable);
DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
break;
default:
break;
@ -176,7 +185,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
DisableCrankshaft(kSuperReference);
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var());
Visit(node->home_object());
@ -185,7 +194,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
DisableCrankshaft(kSuperReference);
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
Visit(node->new_target_var());
@ -282,8 +291,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kWithStatement);
node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
DisableFullCodegenAndCrankshaft(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
@ -313,7 +321,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kTryCatchStatement);
DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{
const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
// This node uses its own prediction, unless it's "uncaught", in which case
@ -332,7 +340,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kTryFinallyStatement);
DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
node->set_catch_prediction(catch_prediction_);
@ -393,14 +401,25 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
// We can only get here from super calls currently.
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(Spread::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kGetIterator);
node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
@ -417,7 +436,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kForOfStatement);
DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_yield_id(yield_count_);
@ -484,8 +503,8 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
DisableCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(node->num_ids()));
DisableFullCodegenAndCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
@ -504,7 +523,7 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
node->BuildConstantProperties(isolate_);
node->InitDepthAndFlags();
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
@ -513,7 +532,8 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
if (node->is_computed_name())
DisableFullCodegenAndCrankshaft(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
@ -524,12 +544,15 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
node->BuildConstantElements(isolate_);
node->InitDepthAndFlags();
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
}
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
@ -569,8 +592,13 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
if (eager_literals_ && node->ShouldEagerCompile()) {
eager_literals_->Add(new (zone())
ThreadedListZoneEntry<FunctionLiteral*>(node));
}
// We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile.
ReserveFeedbackSlots(node);
}
@ -584,22 +612,26 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegenAndCrankshaft(kSuperReference);
}
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
DisableCrankshaft(kRestParameter);
DisableFullCodegenAndCrankshaft(kRestParameter);
}
if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
DisableCrankshaft(kGenerator);
if (IsResumableFunction(node->kind())) {
DisableFullCodegenAndCrankshaft(kGenerator);
}
if (IsClassConstructor(node->kind())) {
DisableCrankshaft(kClassConstructorFunction);
DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
}
VisitDeclarations(scope->declarations());
@ -608,13 +640,26 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_yield_count(yield_count_);
if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) {
PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_crankshaft_reason_));
}
}
return !HasStackOverflow();
}
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
bool AstNumbering::Renumber(Isolate* isolate, Zone* zone,
FunctionLiteral* function) {
AstNumberingVisitor visitor(isolate, zone);
AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
return visitor.Renumber(function);
}
} // namespace internal

15
deps/v8/src/ast/ast-numbering.h

@ -5,6 +5,8 @@
#ifndef V8_AST_AST_NUMBERING_H_
#define V8_AST_AST_NUMBERING_H_
#include <stdint.h>
namespace v8 {
namespace internal {
@ -12,11 +14,20 @@ namespace internal {
class FunctionLiteral;
class Isolate;
class Zone;
template <typename T>
class ThreadedList;
template <typename T>
class ThreadedListZoneEntry;
template <typename T>
class ZoneVector;
namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
// tree; perform catch prediction for TryStatements.
bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
// tree; perform catch prediction for TryStatements. If |eager_literals| is
// non-null, adds any eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
}
// Some details on yield IDs

8
deps/v8/src/ast/ast-traversal-visitor.h

@ -288,7 +288,7 @@ void AstTraversalVisitor<Subclass>::VisitFunctionLiteral(
DeclarationScope* scope = expr->scope();
RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
// A lazily parsed function literal won't have a body.
if (expr->scope()->is_lazily_parsed()) return;
if (expr->scope()->was_lazily_parsed()) return;
RECURSE_EXPRESSION(VisitStatements(expr->body()));
}
@ -470,6 +470,12 @@ void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
PROCESS_EXPRESSION(expr);
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->iterable()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) {

7
deps/v8/src/ast/ast-types.cc

@ -7,6 +7,7 @@
#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@ -209,7 +210,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@ -259,6 +259,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
DCHECK(!map->is_undetectable());
@ -304,8 +305,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case PROMISE_REACTION_JOB_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
@ -315,8 +314,10 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
case CONSTANT_ELEMENTS_PAIR_TYPE:
UNREACHABLE();
return kNone;
}

45
deps/v8/src/ast/ast-value-factory.cc

@ -28,6 +28,8 @@
#include "src/ast/ast-value-factory.h"
#include "src/api.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/utils.h"
@ -219,9 +221,17 @@ void AstValue::Internalize(Isolate* isolate) {
}
}
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
int key = literal[0] - 'a';
if (one_character_strings_[key] == nullptr) {
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
one_character_strings_[key] = GetString(hash, true, literal);
}
return one_character_strings_[key];
}
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
return GetString(hash, true, literal);
@ -260,39 +270,6 @@ const AstConsString* AstValueFactory::NewConsString(
return new_string;
}
const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
const AstRawString* right) {
int left_length = left->length();
int right_length = right->length();
const unsigned char* left_data = left->raw_data();
const unsigned char* right_data = right->raw_data();
if (left->is_one_byte() && right->is_one_byte()) {
uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
memcpy(buffer, left_data, left_length);
memcpy(buffer + left_length, right_data, right_length);
Vector<const uint8_t> literal(buffer, left_length + right_length);
return GetOneByteStringInternal(literal);
} else {
uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
if (left->is_one_byte()) {
for (int i = 0; i < left_length; ++i) {
buffer[i] = left_data[i];
}
} else {
memcpy(buffer, left_data, 2 * left_length);
}
if (right->is_one_byte()) {
for (int i = 0; i < right_length; ++i) {
buffer[i + left_length] = right_data[i];
}
} else {
memcpy(buffer + left_length, right_data, 2 * right_length);
}
Vector<const uint16_t> literal(buffer, left_length + right_length);
return GetTwoByteStringInternal(literal);
}
}
void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to
// strings.

129
deps/v8/src/ast/ast-value-factory.h

@ -30,6 +30,7 @@
#include "src/api.h"
#include "src/base/hashmap.h"
#include "src/conversions.h"
#include "src/globals.h"
#include "src/utils.h"
@ -110,8 +111,9 @@ class AstRawString final : public AstString {
}
private:
friend class AstValueFactory;
friend class AstRawStringInternalizationKey;
friend class AstStringConstants;
friend class AstValueFactory;
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash)
@ -158,10 +160,7 @@ class AstValue : public ZoneObject {
return type_ == STRING;
}
bool IsNumber() const {
return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI ||
type_ == SMI_WITH_DOT;
}
bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool ContainsDot() const {
return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
@ -173,19 +172,30 @@ class AstValue : public ZoneObject {
}
double AsNumber() const {
if (type_ == NUMBER || type_ == NUMBER_WITH_DOT)
return number_;
if (type_ == SMI || type_ == SMI_WITH_DOT)
return smi_;
if (IsHeapNumber()) return number_;
if (IsSmi()) return smi_;
UNREACHABLE();
return 0;
}
Smi* AsSmi() const {
CHECK(type_ == SMI || type_ == SMI_WITH_DOT);
CHECK(IsSmi());
return Smi::FromInt(smi_);
}
bool ToUint32(uint32_t* value) const {
if (IsSmi()) {
int num = smi_;
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
return DoubleToUint32IfEqualToSelf(number_, value);
}
return false;
}
bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string;
}
@ -195,6 +205,9 @@ class AstValue : public ZoneObject {
bool BooleanValue() const;
bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
bool IsHeapNumber() const {
return type_ == NUMBER || type_ == NUMBER_WITH_DOT;
}
bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
bool IsTrue() const { return type_ == BOOLEAN && bool_; }
bool IsUndefined() const { return type_ == UNDEFINED; }
@ -280,7 +293,6 @@ class AstValue : public ZoneObject {
};
};
// For generating constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
@ -291,7 +303,6 @@ class AstValue : public ZoneObject {
F(default, "default") \
F(done, "done") \
F(dot, ".") \
F(dot_class_field_init, ".class-field-init") \
F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
@ -304,6 +315,7 @@ class AstValue : public ZoneObject {
F(get_space, "get ") \
F(length, "length") \
F(let, "let") \
F(name, "name") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
@ -320,6 +332,45 @@ class AstValue : public ZoneObject {
F(use_strict, "use strict") \
F(value, "value")
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME), hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
}
STRING_CONSTANTS(F)
#undef F
}
#define F(name, str) \
AstRawString* name##_string() { return name##_string_; }
STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
private:
Zone zone_;
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
};
#define OTHER_CONSTANTS(F) \
F(true_value) \
F(false_value) \
@ -329,21 +380,24 @@ class AstValue : public ZoneObject {
class AstValueFactory {
public:
AstValueFactory(Zone* zone, uint32_t hash_seed)
AstValueFactory(Zone* zone, AstStringConstants* string_constants,
uint32_t hash_seed)
: string_table_(AstRawStringCompare),
values_(nullptr),
smis_(),
strings_(nullptr),
strings_end_(&strings_),
string_constants_(string_constants),
zone_(zone),
hash_seed_(hash_seed) {
#define F(name, str) name##_string_ = NULL;
STRING_CONSTANTS(F)
#undef F
#define F(name) name##_ = NULL;
#define F(name) name##_ = nullptr;
OTHER_CONSTANTS(F)
#undef F
DCHECK_EQ(hash_seed, string_constants->hash_seed());
std::fill(smis_, smis_ + arraysize(smis_), nullptr);
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
InitializeStringConstants();
}
Zone* zone() const { return zone_; }
@ -361,20 +415,12 @@ class AstValueFactory {
const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
const AstRawString* ConcatStrings(const AstRawString* left,
const AstRawString* right);
void Internalize(Isolate* isolate);
#define F(name, str) \
const AstRawString* name##_string() { \
if (name##_string_ == NULL) { \
const char* data = str; \
name##_string_ = GetOneByteString( \
Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data)))); \
} \
return name##_string_; \
#define F(name, str) \
const AstRawString* name##_string() { \
return string_constants_->name##_string(); \
}
STRING_CONSTANTS(F)
#undef F
@ -415,6 +461,17 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
void InitializeStringConstants() {
#define F(name, str) \
AstRawString* raw_string_##name = string_constants_->name##_string(); \
base::HashMap::Entry* entry_##name = string_table_.LookupOrInsert( \
raw_string_##name, raw_string_##name->hash()); \
DCHECK(entry_##name->value == nullptr); \
entry_##name->value = reinterpret_cast<void*>(1);
STRING_CONSTANTS(F)
#undef F
}
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
@ -423,19 +480,23 @@ class AstValueFactory {
// they can be internalized later).
AstValue* values_;
AstValue* smis_[kMaxCachedSmi + 1];
// We need to keep track of strings_ in order since cons strings require their
// members to be internalized first.
AstString* strings_;
AstString** strings_end_;
// Holds constant string values which are shared across the isolate.
AstStringConstants* string_constants_;
// Caches for faster access: small numbers, one character lowercase strings
// (for minified code).
AstValue* smis_[kMaxCachedSmi + 1];
AstRawString* one_character_strings_[26];
Zone* zone_;
uint32_t hash_seed_;
#define F(name, str) const AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
#define F(name) AstValue* name##_;
OTHER_CONSTANTS(F)
#undef F

226
deps/v8/src/ast/ast.cc

@ -10,6 +10,7 @@
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
@ -28,6 +29,8 @@ namespace internal {
#ifdef DEBUG
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this);
}
@ -70,6 +73,10 @@ bool Expression::IsSmiLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
}
bool Expression::IsNumberLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsNumber();
}
bool Expression::IsStringLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsString();
}
@ -197,9 +204,7 @@ void VariableProxy::BindTo(Variable* var) {
var->set_is_used();
}
void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void VariableProxy::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
@ -211,7 +216,7 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return;
}
variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name());
variable_feedback_slot_ = spec->AddLoadGlobalICSlot();
cache->Put(var(), variable_feedback_slot_);
} else {
variable_feedback_slot_ = spec->AddLoadICSlot();
@ -235,8 +240,7 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
}
}
void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void ForInStatement::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
@ -253,15 +257,12 @@ Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
}
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void Assignment::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_);
}
void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void CountOperation::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
@ -346,6 +347,16 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
FeedbackVectorSlot LiteralProperty::GetStoreDataPropertySlot() const {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return GetSlot(offset);
}
void LiteralProperty::SetStoreDataPropertySlot(FeedbackVectorSlot slot) {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return SetSlot(slot, offset);
}
bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ &&
(value_->IsAnonymousFunctionDefinition() ||
@ -360,12 +371,14 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
kind_(kind),
is_static_(is_static) {}
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void ClassLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral.
prototype_slot_ = spec->AddLoadICSlot();
// ICs must mirror BytecodeGenerator::VisitClassLiteral.
if (FunctionLiteral::NeedsHomeObject(constructor())) {
home_object_slot_ = spec->AddStoreICSlot();
}
if (NeedsProxySlot()) {
proxy_slot_ = spec->AddStoreICSlot();
}
@ -376,6 +389,8 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
}
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
}
}
@ -392,8 +407,7 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
@ -406,6 +420,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
switch (property->kind()) {
case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@ -413,7 +428,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (key->IsStringLiteral()) {
if (property->emit_store()) {
property->SetSlot(spec->AddStoreICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
@ -450,6 +465,8 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
property->SetSlot(spec->AddStoreICSlot());
}
}
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
}
}
@ -491,13 +508,8 @@ bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
property->kind() != ObjectLiteral::Property::PROTOTYPE;
}
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
boilerplate_properties_ * 2, TENURED);
void ObjectLiteral::InitDepthAndFlags() {
if (depth_ > 0) return;
int position = 0;
// Accumulate the value in local variables and store it at the end.
@ -521,50 +533,43 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
m_literal->InitDepthAndFlags();
if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->AsLiteral()->value();
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
const AstValue* key = property->key()->AsLiteral()->raw_value();
Expression* value = property->value();
bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
// Ensure objects that may, at any point in time, contain fields with double
// representation are always treated as nested objects. This is true for
// computed fields (value is undefined), and smi and double literals
// (value->IsNumber()).
// computed fields, and smi and double literals.
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
(value->IsNumber() || value->IsUninitialized(isolate))) {
(value->IsNumberLiteral() || !is_compile_time_value)) {
bit_field_ = MayStoreDoublesField::update(bit_field_, true);
}
is_simple = is_simple && !value->IsUninitialized(isolate);
is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
if (key->IsString() && key->AsString()->AsArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index);
elements++;
key = isolate->factory()->NewNumberFromUint(element_index);
} else if (key->ToArrayIndex(&element_index)) {
} else if (key->ToUint32(&element_index) && element_index != kMaxUInt32) {
max_element_index = Max(element_index, max_element_index);
elements++;
} else if (key->IsNumber()) {
key = isolate->factory()->NumberToString(key);
}
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
constant_properties->set(position++, *value);
// Increment the position for the key and the value.
position += 2;
}
constant_properties_ = constant_properties;
bit_field_ = FastElementsField::update(
bit_field_,
(max_element_index <= 32) || ((2 * elements) >= max_element_index));
@ -574,6 +579,91 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
set_depth(depth_acc);
}
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
Handle<FixedArray> constant_properties =
isolate->factory()->NewFixedArray(boilerplate_properties_ * 2, TENURED);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
if (!IsBoilerplateProperty(property)) {
continue;
}
if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
DCHECK(property->is_computed_name());
break;
}
DCHECK(!property->is_computed_name());
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->AsLiteral()->value();
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
uint32_t element_index = 0;
if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
key = isolate->factory()->NewNumberFromUint(element_index);
} else if (key->IsNumber() && !key->ToArrayIndex(&element_index)) {
key = isolate->factory()->NumberToString(key);
}
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
constant_properties->set(position++, *value);
}
constant_properties_ = constant_properties;
}
bool ObjectLiteral::IsFastCloningSupported() const {
// The FastCloneShallowObject builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && has_shallow_properties() &&
properties_count() <= ConstructorBuiltinsAssembler::
kMaximumClonedShallowObjectProperties;
}
void ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
if (depth_ > 0) return;
int constants_length = values()->length();
// Fill in the literals.
bool is_simple = true;
int depth_acc = 1;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->InitDepthAndFlags();
if (m_literal->depth() + 1 > depth_acc) {
depth_acc = m_literal->depth() + 1;
}
}
if (!CompileTimeValue::IsCompileTimeValue(element)) {
is_simple = false;
}
}
set_is_simple(is_simple);
set_depth(depth_acc);
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
DCHECK_LT(first_spread_index_, 0);
@ -586,8 +676,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
isolate->factory()->NewFixedArrayWithHoles(constants_length);
// Fill in the literals.
bool is_simple = true;
int depth_acc = 1;
bool is_holey = false;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
@ -596,9 +684,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
if (m_literal->depth() + 1 > depth_acc) {
depth_acc = m_literal->depth() + 1;
}
}
// New handle scope here, needs to be after BuildContants().
@ -611,7 +696,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (boilerplate_value->IsUninitialized(isolate)) {
boilerplate_value = handle(Smi::kZero, isolate);
is_simple = false;
}
kind = GetMoreGeneralElementsKind(kind,
@ -623,7 +707,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth_acc == 1 && array_index > 0 &&
if (is_simple() && depth() == 1 && array_index > 0 &&
IsFastSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
}
@ -637,20 +721,20 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
}
// Remember both the literal's constant values as well as the ElementsKind
// in a 2-element FixedArray.
Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
literals->set(0, Smi::FromInt(kind));
literals->set(1, *elements);
// Remember both the literal's constant values as well as the ElementsKind.
Handle<ConstantElementsPair> literals =
isolate->factory()->NewConstantElementsPair(kind, elements);
constant_elements_ = literals;
set_is_simple(is_simple);
set_depth(depth_acc);
}
bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 &&
values()->length() <=
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
}
void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
@ -678,6 +762,16 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
return isolate->factory()->uninitialized_value();
}
void MaterializedLiteral::InitDepthAndFlags() {
if (IsArrayLiteral()) {
return AsArrayLiteral()->InitDepthAndFlags();
}
if (IsObjectLiteral()) {
return AsObjectLiteral()->InitDepthAndFlags();
}
DCHECK(IsRegExpLiteral());
DCHECK_LE(1, depth()); // Depth should be initialized.
}
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) {
@ -687,7 +781,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
return AsObjectLiteral()->BuildConstantProperties(isolate);
}
DCHECK(IsRegExpLiteral());
DCHECK(depth() >= 1); // Depth should be initialized.
}
@ -711,8 +804,7 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
void BinaryOperation::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@ -733,8 +825,7 @@ static bool IsTypeof(Expression* expr) {
}
void CompareOperation::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache_) {
FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@ -892,7 +983,7 @@ bool Expression::IsMonomorphic() const {
}
}
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void Call::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot();
}
@ -931,8 +1022,7 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
statements_(statements),
compare_type_(AstType::None()) {}
void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
void CaseClause::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
}

353
deps/v8/src/ast/ast.h

@ -5,6 +5,7 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
#include "src/assembler.h"
#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
@ -102,6 +103,7 @@ namespace internal {
V(SuperCallReference) \
V(CaseClause) \
V(EmptyParentheses) \
V(GetIterator) \
V(DoExpression) \
V(RewritableExpression)
@ -154,7 +156,7 @@ class AstProperties final BASE_EMBEDDED {
enum Flag {
kNoFlags = 0,
kDontSelfOptimize = 1 << 0,
kDontCrankshaft = 1 << 1
kMustUseIgnitionTurbo = 1 << 1
};
typedef base::Flags<Flag> Flags;
@ -190,6 +192,7 @@ class AstNode: public ZoneObject {
int position() const { return position_; }
#ifdef DEBUG
void Print();
void Print(Isolate* isolate);
#endif // DEBUG
@ -317,6 +320,9 @@ class Expression : public AstNode {
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
// True iff the expression is a literal represented as a number.
bool IsNumberLiteral() const;
// True iff the expression is a string literal.
bool IsStringLiteral() const;
@ -466,9 +472,6 @@ class Block final : public BreakableStatement {
class IgnoreCompletionField
: public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
};
@ -484,9 +487,6 @@ class DoExpression final : public Expression {
}
bool IsAnonymousFunctionDefinition() const;
protected:
static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
private:
friend class AstNodeFactory;
@ -518,8 +518,6 @@ class Declaration : public AstNode {
Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
: AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
private:
VariableProxy* proxy_;
// Nested scope from which the declaration originated.
@ -734,7 +732,7 @@ class ForInStatement final : public ForEachStatement {
void set_subject(Expression* e) { subject_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
@ -778,9 +776,6 @@ class ForInStatement final : public ForEachStatement {
class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
};
@ -826,12 +821,6 @@ class ForOfStatement final : public ForEachStatement {
void set_result_done(Expression* e) { result_done_ = e; }
void set_assign_each(Expression* e) { assign_each_ = e; }
BailoutId ContinueId() const { return EntryId(); }
BailoutId StackCheckId() const { return BackEdgeId(); }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
private:
friend class AstNodeFactory;
@ -842,8 +831,6 @@ class ForOfStatement final : public ForEachStatement {
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL) {}
static int parent_num_ids() { return ForEachStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Variable* iterator_;
Expression* assign_iterator_;
@ -930,30 +917,16 @@ class WithStatement final : public Statement {
Statement* statement() const { return statement_; }
void set_statement(Statement* s) { statement_ = s; }
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
BailoutId EntryId() const { return BailoutId(local_id(1)); }
private:
friend class AstNodeFactory;
WithStatement(Scope* scope, Expression* expression, Statement* statement,
int pos)
: Statement(pos, kWithStatement),
base_id_(BailoutId::None().ToInt()),
scope_(scope),
expression_(expression),
statement_(statement) {}
static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int base_id_;
Scope* scope_;
Expression* expression_;
Statement* statement_;
@ -981,7 +954,7 @@ class CaseClause final : public Expression {
// CaseClause will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used by
// full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() {
@ -1212,22 +1185,15 @@ class SloppyBlockFunctionStatement final : public Statement {
public:
Statement* statement() const { return statement_; }
void set_statement(Statement* statement) { statement_ = statement; }
Scope* scope() const { return scope_; }
SloppyBlockFunctionStatement* next() { return next_; }
void set_next(SloppyBlockFunctionStatement* next) { next_ = next; }
private:
friend class AstNodeFactory;
SloppyBlockFunctionStatement(Statement* statement, Scope* scope)
explicit SloppyBlockFunctionStatement(Statement* statement)
: Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
statement_(statement),
scope_(scope),
next_(nullptr) {}
statement_(statement) {}
Statement* statement_;
Scope* const scope_;
SloppyBlockFunctionStatement* next_;
};
@ -1317,6 +1283,9 @@ class MaterializedLiteral : public Expression {
depth_ = depth;
}
// Populate the depth field and any flags the literal has.
void InitDepthAndFlags();
// Populate the constant properties/elements fixed array.
void BuildConstants(Isolate* isolate);
friend class ArrayLiteral;
@ -1347,11 +1316,15 @@ class LiteralProperty : public ZoneObject {
return slots_[offset];
}
FeedbackVectorSlot GetStoreDataPropertySlot() const;
void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
slots_[offset] = slot;
}
void SetStoreDataPropertySlot(FeedbackVectorSlot slot);
bool NeedsSetFunctionName() const;
protected:
@ -1374,8 +1347,9 @@ class ObjectLiteralProperty final : public LiteralProperty {
COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal.
GETTER,
SETTER, // Property is an accessor function.
PROTOTYPE // Property is __proto__.
SETTER, // Property is an accessor function.
PROTOTYPE, // Property is __proto__.
SPREAD
};
Kind kind() const { return kind_; }
@ -1412,6 +1386,7 @@ class ObjectLiteral final : public MaterializedLiteral {
typedef ObjectLiteralProperty Property;
Handle<FixedArray> constant_properties() const {
DCHECK(!constant_properties_.is_null());
return constant_properties_;
}
int properties_count() const { return boilerplate_properties_; }
@ -1428,6 +1403,17 @@ class ObjectLiteral final : public MaterializedLiteral {
// Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property);
// Populate the depth field and flags.
void InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
Handle<FixedArray> GetOrBuildConstantProperties(Isolate* isolate) {
if (constant_properties_.is_null()) {
BuildConstantProperties(isolate);
}
return constant_properties();
}
// Populate the constant properties fixed array.
void BuildConstantProperties(Isolate* isolate);
@ -1436,6 +1422,9 @@ class ObjectLiteral final : public MaterializedLiteral {
// marked expressions, no store code is emitted.
void CalculateEmitStore(Zone* zone);
// Determines whether the {FastCloneShallowObject} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
@ -1465,22 +1454,15 @@ class ObjectLiteral final : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for a property that is used in simulate instructions.
BailoutId GetIdForPropertyName(int i) {
return BailoutId(local_id(2 * i + 1));
}
BailoutId GetIdForPropertySet(int i) {
return BailoutId(local_id(2 * i + 2));
}
BailoutId GetIdForPropertySet(int i) { return BailoutId(local_id(i + 1)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ObjectLiteral can vary, so num_ids() is not a static method.
int num_ids() const {
return parent_num_ids() + 1 + 2 * properties()->length();
}
int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
private:
@ -1500,7 +1482,6 @@ class ObjectLiteral final : public MaterializedLiteral {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
uint32_t boilerplate_properties_;
FeedbackVectorSlot slot_;
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
@ -1510,9 +1491,6 @@ class ObjectLiteral final : public MaterializedLiteral {
};
class MayStoreDoublesField
: public BitField<bool, HasElementsField::kNext, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
};
@ -1565,11 +1543,11 @@ class RegExpLiteral final : public MaterializedLiteral {
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public MaterializedLiteral {
public:
Handle<FixedArray> constant_elements() const { return constant_elements_; }
Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
}
ElementsKind constant_elements_kind() const {
DCHECK_EQ(2, constant_elements_->length());
return static_cast<ElementsKind>(
Smi::cast(constant_elements_->get(0))->value());
return static_cast<ElementsKind>(constant_elements()->elements_kind());
}
ZoneList<Expression*>* values() const { return values_; }
@ -1583,9 +1561,23 @@ class ArrayLiteral final : public MaterializedLiteral {
// ArrayLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
// Populate the depth field and flags.
void InitDepthAndFlags();
// Get the constant elements fixed array, populating it if necessary.
Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
if (constant_elements_.is_null()) {
BuildConstantElements(isolate);
}
return constant_elements();
}
// Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate);
// Determines whether the {FastCloneShallowArray} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateArrayLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = depth() == 1 ? kShallowElements : kNoFlags;
@ -1614,7 +1606,7 @@ class ArrayLiteral final : public MaterializedLiteral {
kDisableMementos = 1 << 1
};
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
@ -1632,7 +1624,7 @@ class ArrayLiteral final : public MaterializedLiteral {
int first_spread_index_;
FeedbackVectorSlot literal_slot_;
Handle<FixedArray> constant_elements_;
Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
};
@ -1663,6 +1655,9 @@ class VariableProxy final : public Expression {
bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true);
if (is_resolved()) {
var()->set_maybe_assigned();
}
}
bool is_resolved() const { return IsResolvedField::decode(bit_field_); }
@ -1690,7 +1685,7 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot();
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
@ -1786,7 +1781,7 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
@ -1844,7 +1839,7 @@ class Call final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
@ -1876,11 +1871,9 @@ class Call final : public Expression {
allocation_site_ = site;
}
static int num_ids() { return parent_num_ids() + 4; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
BailoutId EvalId() const { return BailoutId(local_id(1)); }
BailoutId LookupId() const { return BailoutId(local_id(2)); }
BailoutId CallId() const { return BailoutId(local_id(3)); }
BailoutId CallId() const { return BailoutId(local_id(1)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@ -1964,7 +1957,7 @@ class CallNew final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// CallNew stores feedback in the exact same way as Call. We can
// piggyback on the type feedback infrastructure for calls.
@ -2138,7 +2131,7 @@ class BinaryOperation final : public Expression {
// BinaryOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
@ -2231,7 +2224,7 @@ class CountOperation final : public Expression {
return binary_operation_slot_;
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CountSlot() const { return slot_; }
@ -2283,7 +2276,7 @@ class CompareOperation final : public Expression {
// CompareOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() const {
@ -2429,7 +2422,7 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot AssignmentSlot() const { return slot_; }
@ -2571,6 +2564,8 @@ class FunctionLiteral final : public Expression {
kAccessorOrMethod
};
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
@ -2594,6 +2589,18 @@ class FunctionLiteral final : public Expression {
}
LanguageMode language_mode() const;
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// The + 1 is because we need an array with room for the literals
// as well as the feedback vector.
literal_feedback_slot_ =
spec->AddCreateClosureSlot(materialized_literal_count_ + 1);
}
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
static bool NeedsHomeObject(Expression* expr);
int materialized_literal_count() { return materialized_literal_count_; }
@ -2644,8 +2651,6 @@ class FunctionLiteral final : public Expression {
return HasDuplicateParameters::decode(bit_field_);
}
bool is_function() const { return IsFunction::decode(bit_field_); }
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
// function will be called immediately:
@ -2691,25 +2696,15 @@ class FunctionLiteral final : public Expression {
int yield_count() { return yield_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; }
bool requires_class_field_init() {
return RequiresClassFieldInit::decode(bit_field_);
}
void set_requires_class_field_init(bool requires_class_field_init) {
bit_field_ =
RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
}
bool is_class_field_initializer() {
return IsClassFieldInitializer::decode(bit_field_);
}
void set_is_class_field_initializer(bool is_class_field_initializer) {
bit_field_ =
IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
}
int return_position() {
return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
}
int function_literal_id() const { return function_literal_id_; }
void set_function_literal_id(int function_literal_id) {
function_literal_id_ = function_literal_id;
}
private:
friend class AstNodeFactory;
@ -2720,7 +2715,7 @@ class FunctionLiteral final : public Expression {
int function_length, FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position,
bool is_function, bool has_braces)
bool has_braces, int function_literal_id)
: Expression(position, kFunctionLiteral),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@ -2733,16 +2728,14 @@ class FunctionLiteral final : public Expression {
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
ast_properties_(zone) {
bit_field_ |=
FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
IsFunction::encode(is_function) |
RequiresClassFieldInit::encode(false) |
ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason) |
IsClassFieldInitializer::encode(false);
ast_properties_(zone),
function_literal_id_(function_literal_id) {
bit_field_ |= FunctionTypeBits::encode(function_type) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
}
@ -2750,15 +2743,11 @@ class FunctionLiteral final : public Expression {
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
: public BitField<bool, IsFunction::kNext, 1> {};
class RequiresClassFieldInit
: public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
class IsClassFieldInitializer
: public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
: public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class DontOptimizeReasonField
: public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {};
: public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
int materialized_literal_count_;
int expected_property_count_;
@ -2774,6 +2763,8 @@ class FunctionLiteral final : public Expression {
const AstString* raw_inferred_name_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
int function_literal_id_;
FeedbackVectorSlot literal_feedback_slot_;
};
// Property is used for passing information
@ -2808,27 +2799,16 @@ class ClassLiteral final : public Expression {
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
VariableProxy* static_initializer_proxy() const {
return static_initializer_proxy_;
bool has_name_static_property() const {
return HasNameStaticProperty::decode(bit_field_);
}
void set_static_initializer_proxy(VariableProxy* proxy) {
static_initializer_proxy_ = proxy;
bool has_static_computed_names() const {
return HasStaticComputedNames::decode(bit_field_);
}
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
BailoutId PrototypeId() { return BailoutId(local_id(1)); }
// Return an AST id for a property that is used in simulate instructions.
BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 2)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ClassLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 2 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
bool NeedsProxySlot() const {
@ -2836,7 +2816,7 @@ class ClassLiteral final : public Expression {
class_variable_proxy()->var()->IsUnallocated();
}
FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
FeedbackVectorSlot HomeObjectSlot() const { return home_object_slot_; }
FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
private:
@ -2844,26 +2824,30 @@ class ClassLiteral final : public Expression {
ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
FunctionLiteral* constructor, ZoneList<Property*>* properties,
int start_position, int end_position)
int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
class_variable_proxy_(class_variable_proxy),
extends_(extends),
constructor_(constructor),
properties_(properties),
static_initializer_proxy_(nullptr) {}
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
properties_(properties) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names);
}
int end_position_;
FeedbackVectorSlot prototype_slot_;
FeedbackVectorSlot home_object_slot_;
FeedbackVectorSlot proxy_slot_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
VariableProxy* static_initializer_proxy_;
class HasNameStaticProperty
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class HasStaticComputedNames
: public BitField<bool, HasNameStaticProperty::kNext, 1> {};
};
@ -2871,6 +2855,19 @@ class NativeFunctionLiteral final : public Expression {
public:
Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; }
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// 0 is a magic number here. It means we are holding the literals
// array for a native function literal, which needs to be
// the empty literals array.
// TODO(mvstanton): The FeedbackVectorSlotCache can be adapted
// to always return the same slot for this case.
literal_feedback_slot_ = spec->AddCreateClosureSlot(0);
}
private:
friend class AstNodeFactory;
@ -2883,6 +2880,7 @@ class NativeFunctionLiteral final : public Expression {
const AstRawString* name_;
v8::Extension* extension_;
FeedbackVectorSlot literal_feedback_slot_;
};
@ -2955,7 +2953,43 @@ class EmptyParentheses final : public Expression {
explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
};
// Represents the spec operation `GetIterator()`
// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
// validate return value of the Symbol.iterator() call.
class GetIterator final : public Expression {
public:
Expression* iterable() const { return iterable_; }
void set_iterable(Expression* iterable) { iterable_ = iterable; }
static int num_ids() { return parent_num_ids(); }
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
iterator_property_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::LOAD_IC);
iterator_call_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::CALL_IC);
}
FeedbackVectorSlot IteratorPropertyFeedbackSlot() const {
return iterator_property_feedback_slot_;
}
FeedbackVectorSlot IteratorCallFeedbackSlot() const {
return iterator_call_feedback_slot_;
}
private:
friend class AstNodeFactory;
explicit GetIterator(Expression* iterable, int pos)
: Expression(pos, kGetIterator), iterable_(iterable) {}
Expression* iterable_;
FeedbackVectorSlot iterator_property_feedback_slot_;
FeedbackVectorSlot iterator_call_feedback_slot_;
};
// ----------------------------------------------------------------------------
// Basic visitor
@ -3217,15 +3251,6 @@ class AstNodeFactory final BASE_EMBEDDED {
try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
}
TryCatchStatement* NewTryCatchStatementForPromiseReject(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (zone_) TryCatchStatement(
try_block, scope, variable, catch_block, HandlerTable::PROMISE, pos);
}
TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
Scope* scope,
Variable* variable,
@ -3258,9 +3283,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyStatement(pos);
}
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) {
return new (zone_) SloppyBlockFunctionStatement(
NewEmptyStatement(kNoSourcePosition), scope);
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement() {
return new (zone_)
SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
}
CaseClause* NewCaseClause(
@ -3437,9 +3462,13 @@ class AstNodeFactory final BASE_EMBEDDED {
Expression* value,
int pos) {
DCHECK(Token::IsAssignmentOp(op));
if (op != Token::INIT && target->IsVariableProxy()) {
target->AsVariableProxy()->set_is_assigned();
}
Assignment* assign = new (zone_) Assignment(op, target, value, pos);
if (assign->is_compound()) {
DCHECK(Token::IsAssignmentOp(op));
assign->binary_operation_ =
NewBinaryOperation(assign->binary_op(), target, value, pos + 1);
}
@ -3463,12 +3492,12 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces) {
bool has_braces, int function_literal_id) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
function_length, function_type, has_duplicate_parameters,
eager_compile_hint, position, true, has_braces);
eager_compile_hint, position, has_braces, function_literal_id);
}
// Creates a FunctionLiteral representing a top-level script, the
@ -3483,7 +3512,8 @@ class AstNodeFactory final BASE_EMBEDDED {
body, materialized_literal_count, expected_property_count,
parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, false, true);
FunctionLiteral::kShouldLazyCompile, 0, true,
FunctionLiteral::kIdTypeTopLevel);
}
ClassLiteral::Property* NewClassLiteralProperty(
@ -3496,9 +3526,12 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties,
int start_position, int end_position) {
return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
start_position, end_position);
int start_position, int end_position,
bool has_name_static_property,
bool has_static_computed_names) {
return new (zone_) ClassLiteral(
proxy, extends, constructor, properties, start_position, end_position,
has_name_static_property, has_static_computed_names);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@ -3534,6 +3567,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos);
}
GetIterator* NewGetIterator(Expression* iterable, int pos) {
return new (zone_) GetIterator(iterable, pos);
}
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }

4
deps/v8/src/ast/compile-time-value.cc

@ -48,8 +48,8 @@ CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
return static_cast<LiteralType>(literal_type->value());
}
Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
}
} // namespace internal

4
deps/v8/src/ast/compile-time-value.h

@ -31,8 +31,8 @@ class CompileTimeValue : public AllStatic {
// Get the type of a compile time value returned by GetValue().
static LiteralType GetLiteralType(Handle<FixedArray> value);
// Get the elements array of a compile time value returned by GetValue().
static Handle<FixedArray> GetElements(Handle<FixedArray> value);
// Get the elements of a compile time value returned by GetValue().
static Handle<HeapObject> GetElements(Handle<FixedArray> value);
private:
static const int kLiteralTypeSlot = 0;

2
deps/v8/src/ast/modules.cc

@ -5,6 +5,8 @@
#include "src/ast/modules.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
namespace v8 {
namespace internal {

53
deps/v8/src/ast/prettyprinter.cc

@ -10,18 +10,19 @@
#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin)
CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
: builder_(isolate) {
isolate_ = isolate;
position_ = 0;
num_prints_ = 0;
found_ = false;
done_ = false;
is_builtin_ = is_builtin;
is_user_js_ = is_user_js;
InitializeAstVisitor(isolate);
}
@ -239,11 +240,11 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void CallPrinter::VisitVariableProxy(VariableProxy* node) {
if (is_builtin_) {
// Variable names of builtins are meaningless due to minification.
Print("(var)");
} else {
if (is_user_js_) {
PrintLiteral(node->name(), false);
} else {
// Variable names of non-user code are meaningless due to minification.
Print("(var)");
}
}
@ -279,9 +280,9 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_;
if (was_found) {
// Bail out if the error is caused by a direct call to a variable in builtin
// code. The variable name is meaningless due to minification.
if (is_builtin_ && node->expression()->IsVariableProxy()) {
// Bail out if the error is caused by a direct call to a variable in
// non-user JS code. The variable name is meaningless due to minification.
if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true;
return;
}
@ -297,9 +298,9 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_;
if (was_found) {
// Bail out if the error is caused by a direct call to a variable in builtin
// code. The variable name is meaningless due to minification.
if (is_builtin_ && node->expression()->IsVariableProxy()) {
// Bail out if the error is caused by a direct call to a variable in
// non-user JS code. The variable name is meaningless due to minification.
if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true;
return;
}
@ -370,6 +371,11 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
void CallPrinter::VisitGetIterator(GetIterator* node) {
Print("GetIterator(");
Find(node->iterable(), true);
Print(")");
}
void CallPrinter::VisitThisFunction(ThisFunction* node) {}
@ -874,15 +880,16 @@ void AstPrinter::PrintTryStatement(TryStatement* node) {
case HandlerTable::CAUGHT:
prediction = "CAUGHT";
break;
case HandlerTable::PROMISE:
prediction = "PROMISE";
break;
case HandlerTable::DESUGARING:
prediction = "DESUGARING";
break;
case HandlerTable::ASYNC_AWAIT:
prediction = "ASYNC_AWAIT";
break;
case HandlerTable::PROMISE:
// Catch prediction resulting in promise rejections aren't
// parsed by the parser.
UNREACHABLE();
}
Print(" %s\n", prediction);
}
@ -1019,6 +1026,9 @@ void AstPrinter::PrintObjectProperties(
case ObjectLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
case ObjectLiteral::Property::SPREAD:
prop_kind = "SPREAD";
break;
}
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY - %s", prop_kind);
@ -1136,7 +1146,14 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
if (node->is_jsruntime()) {
SNPrintF(
buf, "CALL RUNTIME %s code = %p", node->debug_name(),
static_cast<void*>(isolate_->context()->get(node->context_index())));
} else {
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
}
IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments());
}
@ -1181,6 +1198,10 @@ void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position());
}
void AstPrinter::VisitGetIterator(GetIterator* node) {
IndentedScope indent(this, "GET-ITERATOR", node->position());
Visit(node->iterable());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION", node->position());

4
deps/v8/src/ast/prettyprinter.h

@ -15,7 +15,7 @@ namespace internal {
class CallPrinter final : public AstVisitor<CallPrinter> {
public:
explicit CallPrinter(Isolate* isolate, bool is_builtin);
explicit CallPrinter(Isolate* isolate, bool is_user_js);
// The following routine prints the node with position |position| into a
// string.
@ -38,7 +38,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
int position_; // position of ast node to print
bool found_;
bool done_;
bool is_builtin_;
bool is_user_js_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();

511
deps/v8/src/ast/scopes.cc

@ -9,12 +9,27 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
namespace {
void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
bool IsLexical(Variable* variable) {
if (variable == kDummyPreParserLexicalVariable) return true;
if (variable == kDummyPreParserVariable) return false;
return IsLexicalVariableMode(variable->mode());
}
} // namespace
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@ -49,6 +64,19 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
return reinterpret_cast<Variable*>(p->value);
}
void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
VariableMode mode) {
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone));
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
p->value =
mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
}
}
void VariableMap::Remove(Variable* var) {
const AstRawString* name = var->raw_name();
ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
@ -74,21 +102,27 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
return NULL;
}
void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
if (statement_ != nullptr) {
statement_->set_statement(statement);
}
}
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
: ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
SloppyBlockFunctionStatement* stmt) {
void SloppyBlockFunctionMap::Declare(
Zone* zone, const AstRawString* name,
SloppyBlockFunctionMap::Delegate* delegate) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone));
stmt->set_next(static_cast<SloppyBlockFunctionStatement*>(p->value));
p->value = stmt;
delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
p->value = delegate;
}
// ----------------------------------------------------------------------------
// Implementation of Scope
@ -243,8 +277,7 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
Variable* variable = Declare(zone, this, catch_variable_name, VAR,
NORMAL_VARIABLE, kCreatedInitialized);
Variable* variable = Declare(zone, catch_variable_name, VAR);
AllocateHeapSlot(variable);
}
@ -263,7 +296,14 @@ void DeclarationScope::SetDefaults() {
arguments_ = nullptr;
this_function_ = nullptr;
should_eager_compile_ = false;
is_lazily_parsed_ = false;
was_lazily_parsed_ = false;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
is_being_lazily_parsed_ =
outer_declaration_scope ? outer_declaration_scope->is_being_lazily_parsed_
: false;
#endif
}
void Scope::SetDefaults() {
@ -305,7 +345,7 @@ bool DeclarationScope::ShouldEagerCompile() const {
}
void DeclarationScope::set_should_eager_compile() {
should_eager_compile_ = !is_lazily_parsed_;
should_eager_compile_ = !was_lazily_parsed_;
}
void DeclarationScope::set_asm_module() {
@ -354,17 +394,16 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
}
DCHECK(!scope_info->HasOuterScopeInfo());
break;
} else if (scope_info->scope_type() == FUNCTION_SCOPE ||
scope_info->scope_type() == EVAL_SCOPE) {
// TODO(neis): For an eval scope, we currently create an ordinary function
// context. This is wrong and needs to be fixed.
// https://bugs.chromium.org/p/v8/issues/detail?id=5295
} else if (scope_info->scope_type() == FUNCTION_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
if (scope_info->IsAsmFunction())
outer_scope->AsDeclarationScope()->set_asm_function();
if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module();
} else if (scope_info->scope_type() == EVAL_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
} else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) {
outer_scope =
@ -424,11 +463,21 @@ int Scope::num_parameters() const {
return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
}
void DeclarationScope::DeclareSloppyBlockFunction(
const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement) {
auto* delegate =
new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement);
sloppy_block_function_map_.Declare(zone(), name, delegate);
}
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(is_sloppy(language_mode()));
DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
(is_block_scope() && outer_scope()->is_function_scope()));
DCHECK(HasSimpleParameters() || is_block_scope());
DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
bool has_simple_parameters = HasSimpleParameters();
// For each variable which is used as a function declaration in a sloppy
// block,
@ -460,7 +509,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
bool var_created = false;
// Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
DeclarationScope* decl_scope = this;
while (decl_scope->is_eval_scope()) {
@ -468,7 +517,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
Scope* outer_scope = decl_scope->outer_scope();
for (SloppyBlockFunctionStatement* delegate = delegates;
for (SloppyBlockFunctionMap::Delegate* delegate = delegates;
delegate != nullptr; delegate = delegate->next()) {
// Check if there's a conflict with a lexical declaration
Scope* query_scope = delegate->scope()->outer_scope();
@ -482,7 +531,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// `{ let e; try {} catch (e) { function e(){} } }`
do {
var = query_scope->LookupLocal(name);
if (var != nullptr && IsLexicalVariableMode(var->mode())) {
if (var != nullptr && IsLexical(var)) {
should_hoist = false;
break;
}
@ -494,30 +543,39 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Declare a var-style binding for the function in the outer scope
if (!var_created) {
var_created = true;
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
Declaration* declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding check, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
if (factory) {
VariableProxy* proxy =
factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding check, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
} else {
DeclareVariableName(name, VAR);
}
}
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
if (factory) {
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
}
}
}
}
void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(info->isolate(),
&RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
@ -542,13 +600,15 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
scope->HoistSloppyBlockFunctions(&factory);
}
// We are compiling one of three cases:
// We are compiling one of four cases:
// 1) top-level code,
// 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved.
// 4) an asm.js function
DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->already_resolved_);
scope->outer_scope()->already_resolved_ ||
(info->asm_function_scope() && scope->is_function_scope()));
// The outer scope is never lazy.
scope->set_should_eager_compile();
@ -577,11 +637,11 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
DCHECK(is_declaration_scope());
DCHECK(has_this_declaration());
bool subclass_constructor = IsSubclassConstructor(function_kind_);
Variable* var = Declare(
zone(), this, ast_value_factory->this_string(),
subclass_constructor ? CONST : VAR, THIS_VARIABLE,
subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
bool derived_constructor = IsDerivedConstructor(function_kind_);
Variable* var =
Declare(zone(), ast_value_factory->this_string(),
derived_constructor ? CONST : VAR, THIS_VARIABLE,
derived_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var;
}
@ -594,8 +654,7 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(),
VAR, NORMAL_VARIABLE, kCreatedInitialized);
arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
} else if (IsLexicalVariableMode(arguments_->mode())) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
@ -609,14 +668,12 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory);
new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
CONST, NORMAL_VARIABLE, kCreatedInitialized);
new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
this_function_ =
Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
NORMAL_VARIABLE, kCreatedInitialized);
Declare(zone(), ast_value_factory->this_function_string(), CONST);
}
}
@ -637,23 +694,12 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
}
bool Scope::HasBeenRemoved() const {
// TODO(neis): Store this information somewhere instead of calculating it.
if (!is_block_scope()) return false; // Shortcut.
Scope* parent = outer_scope();
if (parent == nullptr) {
DCHECK(is_script_scope());
return false;
}
Scope* sibling = parent->inner_scope();
for (; sibling != nullptr; sibling = sibling->sibling()) {
if (sibling == this) return false;
if (sibling() == this) {
DCHECK_NULL(inner_scope_);
DCHECK(is_block_scope());
return true;
}
DCHECK_NULL(inner_scope_);
return true;
return false;
}
Scope* Scope::GetUnremovedScope() {
@ -667,6 +713,7 @@ Scope* Scope::GetUnremovedScope() {
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
DCHECK(!HasBeenRemoved());
if (variables_.occupancy() > 0 ||
(is_declaration_scope() && calls_sloppy_eval())) {
@ -705,7 +752,12 @@ Scope* Scope::FinalizeBlockScope() {
PropagateUsageFlagsToScope(outer_scope_);
// This block does not need a context.
num_heap_slots_ = 0;
return NULL;
// Mark scope as removed by making it its own sibling.
sibling_ = this;
DCHECK(HasBeenRemoved());
return nullptr;
}
void DeclarationScope::AddLocal(Variable* var) {
@ -715,13 +767,13 @@ void DeclarationScope::AddLocal(Variable* var) {
locals_.Add(var);
}
Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name,
Variable* Scope::Declare(Zone* zone, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag) {
bool added;
Variable* var =
variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
variables_.Declare(zone, this, name, mode, kind, initialization_flag,
maybe_assigned_flag, &added);
if (added) locals_.Add(var);
return var;
@ -796,6 +848,7 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK(!already_resolved_);
DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall();
if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
}
Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
@ -869,12 +922,13 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_);
DCHECK(!is_optional || !is_rest);
DCHECK(!is_being_lazily_parsed_);
DCHECK(!was_lazily_parsed_);
Variable* var;
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
var =
Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
@ -894,8 +948,9 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
return Declare(zone(), this, name, mode, kind, init_flag,
maybe_assigned_flag);
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
Variable* Scope::DeclareVariable(
@ -904,6 +959,8 @@ Variable* Scope::DeclareVariable(
bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
@ -1002,6 +1059,28 @@ Variable* Scope::DeclareVariable(
return var;
}
void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariableName(name, mode);
}
DCHECK(!is_with_scope());
DCHECK(!is_eval_scope());
// Unlike DeclareVariable, DeclareVariableName allows declaring variables in
// catch scopes: Parser::RewriteCatchPattern bypasses DeclareVariable by
// calling DeclareLocal directly, and it doesn't make sense to add a similar
// bypass mechanism for PreParser.
DCHECK(is_declaration_scope() || (IsLexicalVariableMode(mode) &&
(is_block_scope() || is_catch_scope())));
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
variables_.DeclareName(zone(), name, mode);
}
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
int start_position, VariableKind kind) {
@ -1009,7 +1088,7 @@ VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved_);
DCHECK_EQ(!needs_migration_, factory->zone() == zone());
DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy;
@ -1026,8 +1105,7 @@ void Scope::AddUnresolved(VariableProxy* proxy) {
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind,
kCreatedInitialized);
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
}
@ -1050,26 +1128,6 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
return false;
}
bool Scope::RemoveUnresolved(const AstRawString* name) {
if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
VariableProxy* removed = unresolved_;
unresolved_ = unresolved_->next_unresolved();
removed->set_next_unresolved(nullptr);
return true;
}
VariableProxy* current = unresolved_;
while (current != nullptr) {
VariableProxy* next = current->next_unresolved();
if (next != nullptr && next->raw_name() == name) {
current->set_next_unresolved(next->next_unresolved());
next->set_next_unresolved(nullptr);
return true;
}
current = next;
}
return false;
}
Variable* Scope::NewTemporary(const AstRawString* name) {
DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone())
@ -1157,9 +1215,9 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
// guaranteed to be correct.
for (const Scope* s = this; s != outer; s = s->outer_scope_) {
// Eval forces context allocation on all outer scopes, so we don't need to
// look at those scopes. Sloppy eval makes all top-level variables dynamic,
// whereas strict-mode requires context allocation.
if (s->is_eval_scope()) return !is_strict(s->language_mode());
// look at those scopes. Sloppy eval makes top-level non-lexical variables
// dynamic, whereas strict-mode requires context allocation.
if (s->is_eval_scope()) return is_sloppy(s->language_mode());
// Catch scopes force context allocation of all variables.
if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation.
@ -1276,7 +1334,7 @@ Scope* Scope::GetOuterScopeWithContext() {
Handle<StringSet> DeclarationScope::CollectNonLocals(
ParseInfo* info, Handle<StringSet> non_locals) {
VariableProxy* free_variables = FetchFreeVariables(this, true, info);
VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(non_locals, proxy->name());
@ -1292,21 +1350,30 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
params_.Clear();
decls_.Clear();
locals_.Clear();
sloppy_block_function_map_.Clear();
variables_.Clear();
// Make sure we won't walk the scope tree from here on.
inner_scope_ = nullptr;
unresolved_ = nullptr;
if (aborted && !IsArrowFunction(function_kind_)) {
DeclareDefaultFunctionVariables(ast_value_factory);
if (aborted) {
// Prepare scope for use in the outer zone.
zone_ = ast_value_factory->zone();
variables_.Reset(ZoneAllocationPolicy(zone_));
sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
if (!IsArrowFunction(function_kind_)) {
DeclareDefaultFunctionVariables(ast_value_factory);
}
} else {
// Make sure this scope isn't used for allocation anymore.
zone_ = nullptr;
variables_.Invalidate();
sloppy_block_function_map_.Invalidate();
}
#ifdef DEBUG
needs_migration_ = false;
is_being_lazily_parsed_ = false;
#endif
is_lazily_parsed_ = !aborted;
was_lazily_parsed_ = !aborted;
}
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
@ -1317,9 +1384,8 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
for (VariableProxy* proxy =
FetchFreeVariables(this, !FLAG_lazy_inner_functions);
proxy != nullptr; proxy = proxy->next_unresolved()) {
for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
proxy = proxy->next_unresolved()) {
DCHECK(!proxy->is_resolved());
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
copy->set_next_unresolved(unresolved);
@ -1339,8 +1405,10 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
}
#ifdef DEBUG
static const char* Header(ScopeType scope_type, FunctionKind function_kind,
bool is_declaration_scope) {
namespace {
const char* Header(ScopeType scope_type, FunctionKind function_kind,
bool is_declaration_scope) {
switch (scope_type) {
case EVAL_SCOPE: return "eval";
// TODO(adamk): Should we print concise method scopes specially?
@ -1359,18 +1427,13 @@ static const char* Header(ScopeType scope_type, FunctionKind function_kind,
return NULL;
}
void Indent(int n, const char* str) { PrintF("%*s%s", n, "", str); }
static void Indent(int n, const char* str) {
PrintF("%*s%s", n, "", str);
}
static void PrintName(const AstRawString* name) {
void PrintName(const AstRawString* name) {
PrintF("%.*s", name->length(), name->raw_data());
}
static void PrintLocation(Variable* var) {
void PrintLocation(Variable* var) {
switch (var->location()) {
case VariableLocation::UNALLOCATED:
break;
@ -1392,45 +1455,48 @@ static void PrintLocation(Variable* var) {
}
}
static void PrintVar(int indent, Variable* var) {
if (var->is_used() || !var->IsUnallocated()) {
Indent(indent, VariableMode2String(var->mode()));
PrintF(" ");
if (var->raw_name()->IsEmpty())
PrintF(".%p", reinterpret_cast<void*>(var));
else
PrintName(var->raw_name());
PrintF("; // ");
PrintLocation(var);
bool comma = !var->IsUnallocated();
if (var->has_forced_context_allocation()) {
if (comma) PrintF(", ");
PrintF("forced context allocation");
comma = true;
}
if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
PrintF("never assigned");
}
PrintF("\n");
void PrintVar(int indent, Variable* var) {
Indent(indent, VariableMode2String(var->mode()));
PrintF(" ");
if (var->raw_name()->IsEmpty())
PrintF(".%p", reinterpret_cast<void*>(var));
else
PrintName(var->raw_name());
PrintF("; // ");
PrintLocation(var);
bool comma = !var->IsUnallocated();
if (var->has_forced_context_allocation()) {
if (comma) PrintF(", ");
PrintF("forced context allocation");
comma = true;
}
if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
PrintF("never assigned");
}
PrintF("\n");
}
static void PrintMap(int indent, VariableMap* map, bool locals) {
void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
Variable* function_var) {
bool printed_label = false;
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
bool local = !IsDynamicVariableMode(var->mode());
if (locals ? local : !local) {
if (var == nullptr) {
Indent(indent, "<?>\n");
} else {
PrintVar(indent, var);
if ((locals ? local : !local) &&
(var->is_used() || !var->IsUnallocated())) {
if (!printed_label) {
Indent(indent, label);
printed_label = true;
}
PrintVar(indent, var);
}
}
}
} // anonymous namespace
void DeclarationScope::PrintParameters() {
PrintF(" (");
for (int i = 0; i < params_.length(); i++) {
@ -1487,9 +1553,12 @@ void Scope::Print(int n) {
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (is_declaration_scope()) {
DeclarationScope* scope = AsDeclarationScope();
if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
}
if (has_forced_context_allocation()) {
Indent(n1, "// forces context allocation\n");
}
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
@ -1505,12 +1574,22 @@ void Scope::Print(int n) {
PrintVar(n1, function);
}
if (variables_.Start() != NULL) {
Indent(n1, "// local vars:\n");
PrintMap(n1, &variables_, true);
// Print temporaries.
{
bool printed_header = false;
for (Variable* local : locals_) {
if (local->mode() != TEMPORARY) continue;
if (!printed_header) {
printed_header = true;
Indent(n1, "// temporary vars:\n");
}
PrintVar(n1, local);
}
}
Indent(n1, "// dynamic vars:\n");
PrintMap(n1, &variables_, false);
if (variables_.occupancy() > 0) {
PrintMap(n1, "// local vars:\n", &variables_, true, function);
PrintMap(n1, "// dynamic vars:\n", &variables_, false, function);
}
// Print inner scopes (disable by providing negative n).
@ -1539,6 +1618,12 @@ void Scope::CheckScopePositions() {
void Scope::CheckZones() {
DCHECK(!needs_migration_);
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->was_lazily_parsed()) {
DCHECK_NULL(scope->zone());
DCHECK_NULL(scope->inner_scope_);
continue;
}
CHECK_EQ(scope->zone(), zone());
scope->CheckZones();
}
@ -1548,8 +1633,7 @@ void Scope::CheckZones() {
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE,
kCreatedInitialized);
Variable* var = variables_.Declare(zone(), nullptr, name, mode);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
@ -1590,6 +1674,13 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
// The variable could not be resolved statically.
if (var == nullptr) return var;
// TODO(marja): Separate LookupRecursive for preparsed scopes better.
if (var == kDummyPreParserVariable || var == kDummyPreParserLexicalVariable) {
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(FLAG_lazy_inner_functions);
return var;
}
if (is_function_scope() && !var->is_dynamic()) {
var->ForceContextAllocation();
}
@ -1641,34 +1732,20 @@ void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(!proxy->is_resolved());
Variable* var = LookupRecursive(proxy, nullptr);
ResolveTo(info, proxy, var);
if (FLAG_lazy_inner_functions) {
if (info != nullptr && info->is_native()) return;
// Pessimistically force context allocation for all variables to which inner
// scope variables could potentially resolve to.
Scope* scope = GetClosureScope()->outer_scope_;
while (scope != nullptr && scope->scope_info_.is_null()) {
var = scope->LookupLocal(proxy->raw_name());
if (var != nullptr) {
// Since we don't lazy parse inner arrow functions, inner functions
// cannot refer to the outer "this".
if (!var->is_dynamic() && !var->is_this() &&
!var->has_forced_context_allocation()) {
var->ForceContextAllocation();
var->set_is_used();
// We don't know what the (potentially lazy parsed) inner function
// does with the variable; pessimistically assume that it's assigned.
var->set_maybe_assigned();
}
}
scope = scope->outer_scope_;
}
}
}
namespace {
bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
if (var->mode() == DYNAMIC_LOCAL) {
// Dynamically introduced variables never need a hole check (since they're
// VAR bindings, either from var or function declarations), but the variable
// they shadow might need a hole check, which we want to do if we decide
// that no shadowing variable was dynamically introoduced.
DCHECK(!var->binding_needs_init());
return AccessNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
}
if (!var->binding_needs_init()) {
return false;
}
@ -1703,8 +1780,7 @@ bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
}
if (var->is_this()) {
DCHECK(
IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
DCHECK(IsDerivedConstructor(scope->GetDeclarationScope()->function_kind()));
// TODO(littledan): implement 'this' hole check elimination.
return true;
}
@ -1749,37 +1825,65 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
DCHECK(variables_.occupancy() == 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
if (!var->is_dynamic()) {
var->set_is_used();
var->ForceContextAllocation();
if (proxy->is_assigned()) var->set_maybe_assigned();
}
}
} else {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
ResolveVariable(info, proxy);
}
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
ResolveVariable(info, proxy);
}
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->ResolveVariablesRecursively(info);
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
scope->ResolveVariablesRecursively(info);
}
}
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
bool try_to_resolve, ParseInfo* info,
ParseInfo* info,
VariableProxy* stack) {
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
Scope* lookup =
is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
? outer_scope()
: this;
for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
proxy = next) {
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
Variable* var = nullptr;
if (try_to_resolve) {
var = LookupRecursive(proxy, max_outer_scope->outer_scope());
}
Variable* var =
lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
} else if (info != nullptr) {
ResolveTo(info, proxy, var);
} else {
var->set_is_used();
} else if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
if (info != nullptr) {
// In this case we need to leave scopes in a way that they can be
// allocated. If we resolved variables from lazy parsed scopes, we need
// to context allocate the var.
ResolveTo(info, proxy, var);
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
}
}
}
@ -1787,8 +1891,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
unresolved_ = nullptr;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
stack =
scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
}
return stack;
@ -1823,7 +1926,10 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
if ((is_script_scope() || is_eval_scope()) &&
IsLexicalVariableMode(var->mode())) {
return true;
}
return var->has_forced_context_allocation() || inner_scope_calls_eval_;
}
@ -1880,6 +1986,7 @@ void DeclarationScope::AllocateParameterLocals() {
DCHECK_EQ(this, var->scope());
if (uses_sloppy_arguments) {
var->set_is_used();
var->set_maybe_assigned();
var->ForceContextAllocation();
}
AllocateParameter(var, i);
@ -1969,7 +2076,7 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_);
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) {
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return;
}
@ -1994,9 +2101,9 @@ void Scope::AllocateVariablesRecursively() {
// Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context,
// even if no local variables were statically allocated in the scope.
// Likewise for modules.
// Likewise for modules and function scopes representing asm.js modules.
bool must_have_context =
is_with_scope() || is_module_scope() ||
is_with_scope() || is_module_scope() || IsAsmModule() ||
(is_function_scope() && calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() && calls_sloppy_eval());

118
deps/v8/src/ast/scopes.h

@ -9,6 +9,7 @@
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
namespace v8 {
@ -20,6 +21,7 @@ class AstRawString;
class Declaration;
class ParseInfo;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
class VariableProxy;
@ -28,11 +30,16 @@ class VariableMap: public ZoneHashMap {
public:
explicit VariableMap(Zone* zone);
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
bool* added = nullptr);
Variable* Declare(
Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode,
VariableKind kind = NORMAL_VARIABLE,
InitializationFlag initialization_flag = kCreatedInitialized,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
bool* added = nullptr);
// Records that "name" exists (if not recorded yet) but doesn't create a
// Variable. Useful for preparsing.
void DeclareName(Zone* zone, const AstRawString* name, VariableMode mode);
Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@ -43,9 +50,24 @@ class VariableMap: public ZoneHashMap {
// Sloppy block-scoped function declarations to var-bind
class SloppyBlockFunctionMap : public ZoneHashMap {
public:
class Delegate : public ZoneObject {
public:
explicit Delegate(Scope* scope,
SloppyBlockFunctionStatement* statement = nullptr)
: scope_(scope), statement_(statement), next_(nullptr) {}
void set_statement(Statement* statement);
void set_next(Delegate* next) { next_ = next; }
Delegate* next() const { return next_; }
Scope* scope() const { return scope_; }
private:
Scope* scope_;
SloppyBlockFunctionStatement* statement_;
Delegate* next_;
};
explicit SloppyBlockFunctionMap(Zone* zone);
void Declare(Zone* zone, const AstRawString* name,
SloppyBlockFunctionStatement* statement);
void Declare(Zone* zone, const AstRawString* name, Delegate* delegate);
};
enum class AnalyzeMode { kRegular, kDebugger };
@ -148,7 +170,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind,
InitializationFlag init_flag = kCreatedInitialized,
VariableKind kind = NORMAL_VARIABLE,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
@ -157,6 +180,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
void DeclareVariableName(const AstRawString* name, VariableMode mode);
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
@ -177,7 +202,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op.
bool RemoveUnresolved(VariableProxy* var);
bool RemoveUnresolved(const AstRawString* name);
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
@ -207,14 +231,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Scope-specific info.
// Inform the scope and outer scopes that the corresponding code contains an
// eval call. We don't record eval calls from innner scopes in the outer most
// script scope, as we only see those when parsing eagerly. If we recorded the
// calls then, the outer most script scope would look different depending on
// whether we parsed eagerly or not which is undesirable.
// eval call.
void RecordEvalCall() {
scope_calls_eval_ = true;
inner_scope_calls_eval_ = true;
for (Scope* scope = outer_scope(); scope && !scope->is_script_scope();
for (Scope* scope = outer_scope(); scope != nullptr;
scope = scope->outer_scope()) {
scope->inner_scope_calls_eval_ = true;
}
@ -303,6 +324,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode());
}
bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
bool IsAsmModule() const;
bool IsAsmFunction() const;
// Does this scope have the potential to execute declarations non-linearly?
@ -423,6 +445,22 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
bool RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
inner_scope_ = inner_scope_->sibling_;
return true;
}
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
if (scope->sibling_ == inner_scope) {
scope->sibling_ = scope->sibling_->sibling_;
return true;
}
}
return false;
}
protected:
explicit Scope(Zone* zone);
@ -431,10 +469,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
}
private:
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* Declare(
Zone* zone, const AstRawString* name, VariableMode mode,
VariableKind kind = NORMAL_VARIABLE,
InitializationFlag initialization_flag = kCreatedInitialized,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// This method should only be invoked on scopes created during parsing (i.e.,
// not deserialized from a context). Also, since NeedsContext() is only
@ -527,7 +566,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved.
VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
bool try_to_resolve = true,
ParseInfo* info = nullptr,
VariableProxy* stack = nullptr);
@ -556,30 +594,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) {
DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
inner_scope->sibling_ = inner_scope_;
inner_scope_ = inner_scope;
inner_scope->outer_scope_ = this;
}
void RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
inner_scope_ = inner_scope_->sibling_;
return;
}
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
if (scope->sibling_ == inner_scope) {
scope->sibling_ = scope->sibling_->sibling_;
return;
}
}
}
void SetDefaults();
friend class DeclarationScope;
friend class ScopeTestHelper;
};
class DeclarationScope : public Scope {
@ -616,7 +639,15 @@ class DeclarationScope : public Scope {
IsClassConstructor(function_kind())));
}
bool is_lazily_parsed() const { return is_lazily_parsed_; }
bool was_lazily_parsed() const { return was_lazily_parsed_; }
#ifdef DEBUG
void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
is_being_lazily_parsed_ = is_being_lazily_parsed;
}
bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
#endif
bool ShouldEagerCompile() const;
void set_should_eager_compile();
@ -629,7 +660,7 @@ class DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; }
void set_asm_module();
bool asm_function() const { return asm_function_; }
void set_asm_function() { asm_module_ = true; }
void set_asm_function() { asm_function_ = true; }
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
@ -736,10 +767,9 @@ class DeclarationScope : public Scope {
// initializers.
void AddLocal(Variable* var);
void DeclareSloppyBlockFunction(const AstRawString* name,
SloppyBlockFunctionStatement* statement) {
sloppy_block_function_map_.Declare(zone(), name, statement);
}
void DeclareSloppyBlockFunction(
const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement = nullptr);
// Go through sloppy_block_function_map_ and hoist those (into this scope)
// which should be hoisted.
@ -819,7 +849,11 @@ class DeclarationScope : public Scope {
// This scope uses "super" property ('super.foo').
bool scope_uses_super_property_ : 1;
bool should_eager_compile_ : 1;
bool is_lazily_parsed_ : 1;
// Set to true after we have finished lazy parsing the scope.
bool was_lazily_parsed_ : 1;
#if DEBUG
bool is_being_lazily_parsed_ : 1;
#endif
// Parameter list in source order.
ZoneList<Variable*> params_;

1
deps/v8/src/ast/variables.cc

@ -6,6 +6,7 @@
#include "src/ast/scopes.h"
#include "src/globals.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

9
deps/v8/src/bailout-reason.h

@ -88,6 +88,7 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGenerator, "Generator") \
V(kGetIterator, "GetIterator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
@ -125,6 +126,7 @@ namespace internal {
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
@ -138,7 +140,6 @@ namespace internal {
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \
@ -165,7 +166,7 @@ namespace internal {
V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \
V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kOptimizedTooManyTimes, "Optimized too many times") \
V(kDeoptimizedTooManyTimes, "Deoptimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
@ -263,7 +264,9 @@ namespace internal {
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function")
"Should not directly enter OSR-compiled function") \
V(kUnexpectedReturnFromWasmTrap, \
"Should not return after throwing a wasm trap")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {

3
deps/v8/src/base.isolate

@ -4,6 +4,9 @@
{
'includes': [
'../third_party/icu/icu.isolate',
# MSVS runtime libraries.
'../gypfiles/win/msvs_dependencies.isolate',
],
'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {

10
deps/v8/src/base/cpu.cc

@ -24,6 +24,9 @@
#ifndef POWER_8
#define POWER_8 0x10000
#endif
#ifndef POWER_9
#define POWER_9 0x20000
#endif
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
@ -670,7 +673,9 @@ CPU::CPU()
part_ = -1;
if (auxv_cpu_type) {
if (strcmp(auxv_cpu_type, "power8") == 0) {
if (strcmp(auxv_cpu_type, "power9") == 0) {
part_ = PPC_POWER9;
} else if (strcmp(auxv_cpu_type, "power8") == 0) {
part_ = PPC_POWER8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) {
part_ = PPC_POWER7;
@ -689,6 +694,9 @@ CPU::CPU()
#elif V8_OS_AIX
switch (_system_configuration.implementation) {
case POWER_9:
part_ = PPC_POWER9;
break;
case POWER_8:
part_ = PPC_POWER8;
break;

1
deps/v8/src/base/cpu.h

@ -69,6 +69,7 @@ class V8_BASE_EXPORT CPU final {
PPC_POWER6,
PPC_POWER7,
PPC_POWER8,
PPC_POWER9,
PPC_G4,
PPC_G5,
PPC_PA6T

17
deps/v8/src/base/hashmap.h

@ -70,6 +70,14 @@ class TemplateHashMapImpl {
// Empties the hash map (occupancy() == 0).
void Clear();
// Empties the map and makes it unusable for allocation.
void Invalidate() {
AllocationPolicy::Delete(map_);
map_ = nullptr;
occupancy_ = 0;
capacity_ = 0;
}
// The number of (non-empty) entries in the table.
uint32_t occupancy() const { return occupancy_; }
@ -89,6 +97,14 @@ class TemplateHashMapImpl {
Entry* Start() const;
Entry* Next(Entry* entry) const;
void Reset(AllocationPolicy allocator) {
Initialize(capacity_, allocator);
occupancy_ = 0;
}
protected:
void Initialize(uint32_t capacity, AllocationPolicy allocator);
private:
Entry* map_;
uint32_t capacity_;
@ -102,7 +118,6 @@ class TemplateHashMapImpl {
Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator);
};
template <typename Key, typename Value, typename MatchFun,

2
deps/v8/src/base/iterator.h

@ -7,8 +7,6 @@
#include <iterator>
#include "src/base/macros.h"
namespace v8 {
namespace base {

15
deps/v8/src/base/logging.cc

@ -14,9 +14,8 @@ namespace v8 {
namespace base {
// Explicit instantiations for commonly used comparisons.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \
template std::string* MakeCheckOpString<type, type>( \
type const&, type const&, char const*);
#define DEFINE_MAKE_CHECK_OP_STRING(type) \
template std::string* MakeCheckOpString<type, type>(type, type, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@ -29,11 +28,11 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
// Explicit instantiations for floating point checks.
#define DEFINE_CHECK_OP_IMPL(NAME) \
template std::string* Check##NAME##Impl<float, float>( \
float const& lhs, float const& rhs, char const* msg); \
template std::string* Check##NAME##Impl<double, double>( \
double const& lhs, double const& rhs, char const* msg);
#define DEFINE_CHECK_OP_IMPL(NAME) \
template std::string* Check##NAME##Impl<float, float>(float lhs, float rhs, \
char const* msg); \
template std::string* Check##NAME##Impl<double, double>( \
double lhs, double rhs, char const* msg);
DEFINE_CHECK_OP_IMPL(EQ)
DEFINE_CHECK_OP_IMPL(NE)
DEFINE_CHECK_OP_IMPL(LE)

105
deps/v8/src/base/logging.h

@ -55,13 +55,14 @@ namespace base {
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
#define CHECK_OP(name, op, lhs, rhs) \
do { \
if (std::string* _msg = ::v8::base::Check##name##Impl( \
(lhs), (rhs), #lhs " " #op " " #rhs)) { \
V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
#define CHECK_OP(name, op, lhs, rhs) \
do { \
if (std::string* _msg = \
::v8::base::Check##name##Impl<decltype(lhs), decltype(rhs)>( \
(lhs), (rhs), #lhs " " #op " " #rhs)) { \
V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
} while (0)
#else
@ -73,13 +74,22 @@ namespace base {
#endif
// Helper to determine how to pass values: Pass scalars and arrays by value,
// others by const reference. std::decay<T> provides the type which should be
// used to pass T by value, e.g. converts array to pointer and removes const,
// volatile and reference.
template <typename T>
struct PassType : public std::conditional<
std::is_scalar<typename std::decay<T>::type>::value,
typename std::decay<T>::type, T const&> {};
// Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can
// be out of line, while the "Impl" code should be inline. Caller
// takes ownership of the returned string.
template <typename Lhs, typename Rhs>
std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
std::string* MakeCheckOpString(typename PassType<Lhs>::type lhs,
typename PassType<Rhs>::type rhs,
char const* msg) {
std::ostringstream ss;
ss << msg << " (" << lhs << " vs. " << rhs << ")";
@ -90,7 +100,7 @@ std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
// in logging.cc.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \
extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
type const&, type const&, char const*);
type, type, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@ -101,27 +111,77 @@ DEFINE_MAKE_CHECK_OP_STRING(char const*)
DEFINE_MAKE_CHECK_OP_STRING(void const*)
#undef DEFINE_MAKE_CHECK_OP_STRING
// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
// signed, and Rhs is unsigned. False in all other cases.
template <typename Lhs, typename Rhs>
struct is_signed_vs_unsigned {
enum : bool {
value = std::is_integral<Lhs>::value && std::is_integral<Rhs>::value &&
std::is_signed<Lhs>::value && std::is_unsigned<Rhs>::value
};
};
// Same thing, other way around: Lhs is unsigned, Rhs signed.
template <typename Lhs, typename Rhs>
struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
// Specialize the compare functions for signed vs. unsigned comparisons.
// std::enable_if ensures that this template is only instantiable if both Lhs
// and Rhs are integral types, and their signedness does not match.
#define MAKE_UNSIGNED(Type, value) \
static_cast<typename std::make_unsigned<Type>::type>(value)
#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL) \
template <typename Lhs, typename Rhs> \
V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
Cmp##NAME##Impl(Lhs const& lhs, Rhs const& rhs) { \
return IMPL; \
}
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, EQ, CmpEQImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, NE, CmpNEImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LT, CmpGTImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LE, CmpGEImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GT, CmpLTImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GE, CmpLEImpl(rhs, lhs))
#undef MAKE_UNSIGNED
#undef DEFINE_SIGNED_MISMATCH_COMP
// Helper functions for CHECK_OP macro.
// The (int, int) specialization works around the issue that the compiler
// will not instantiate the template version of the function on values of
// unnamed enum type - see comment below.
// The (float, float) and (double, double) instantiations are explicitly
// externialized to ensure proper 32/64-bit comparisons on x86.
// externalized to ensure proper 32/64-bit comparisons on x86.
// The Cmp##NAME##Impl function is only instantiable if one of the two types is
// not integral or their signedness matches (i.e. whenever no specialization is
// required, see above). Otherwise it is disabled by the enable_if construct,
// and the compiler will pick a specialization from above.
#define DEFINE_CHECK_OP_IMPL(NAME, op) \
template <typename Lhs, typename Rhs> \
V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \
char const* msg) { \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
V8_INLINE \
typename std::enable_if<!is_signed_vs_unsigned<Lhs, Rhs>::value && \
!is_unsigned_vs_signed<Lhs, Rhs>::value, \
bool>::type \
Cmp##NAME##Impl(typename PassType<Lhs>::type lhs, \
typename PassType<Rhs>::type rhs) { \
return lhs op rhs; \
} \
V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \
template <typename Lhs, typename Rhs> \
V8_INLINE std::string* Check##NAME##Impl(typename PassType<Lhs>::type lhs, \
typename PassType<Rhs>::type rhs, \
char const* msg) { \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
bool cmp = Cmp##NAME##Impl<Lhs, Rhs>(lhs, rhs); \
return V8_LIKELY(cmp) ? nullptr \
: MakeCheckOpString<Lhs, Rhs>(lhs, rhs, msg); \
} \
extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
float const& lhs, float const& rhs, char const* msg); \
float lhs, float rhs, char const* msg); \
extern template V8_BASE_EXPORT std::string* \
Check##NAME##Impl<double, double>(double const& lhs, double const& rhs, \
Check##NAME##Impl<double, double>(double lhs, double rhs, \
char const* msg);
DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=)
@ -141,11 +201,6 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
// Exposed for making debugging easier (to see where your function is being
// called, just add a call to DumpBacktrace).
void DumpBacktrace();
} // namespace base
} // namespace v8

19
deps/v8/src/base/macros.h

@ -282,23 +282,4 @@ inline T RoundUp(T x, intptr_t m) {
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
namespace v8 {
namespace base {
// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
// which requires C++11. Switch to std::is_fundamental once possible.
template <typename T>
inline bool is_fundamental() {
return false;
}
template <>
inline bool is_fundamental<uint8_t>() {
return true;
}
} // namespace base
} // namespace v8
#endif // V8_BASE_MACROS_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save