Browse Source

deps: update V8 to 5.9.211.32

PR-URL: https://github.com/nodejs/node/pull/13263
Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com>
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
v6
Michaël Zasso 8 years ago
parent
commit
3dc8c3bed4
  1. 46
      deps/v8/.gitignore
  2. 6
      deps/v8/.gn
  3. 3
      deps/v8/AUTHORS
  4. 444
      deps/v8/BUILD.gn
  5. 1145
      deps/v8/ChangeLog
  6. 20
      deps/v8/DEPS
  7. 1
      deps/v8/OWNERS
  8. 28
      deps/v8/PRESUBMIT.py
  9. 7
      deps/v8/gni/isolate.gni
  10. 6
      deps/v8/gni/v8.gni
  11. 12
      deps/v8/gypfiles/all.gyp
  12. 3
      deps/v8/gypfiles/features.gypi
  13. 1
      deps/v8/gypfiles/isolate.gypi
  14. 25
      deps/v8/gypfiles/standalone.gypi
  15. 1
      deps/v8/gypfiles/toolchain.gypi
  16. 8
      deps/v8/include/libplatform/libplatform.h
  17. 43
      deps/v8/include/v8-debug.h
  18. 58
      deps/v8/include/v8-experimental.h
  19. 2
      deps/v8/include/v8-inspector.h
  20. 8
      deps/v8/include/v8-platform.h
  21. 5
      deps/v8/include/v8-profiler.h
  22. 6
      deps/v8/include/v8-version.h
  23. 650
      deps/v8/include/v8.h
  24. 6
      deps/v8/infra/config/cq.cfg
  25. 69
      deps/v8/infra/mb/mb_config.pyl
  26. 3
      deps/v8/src/DEPS
  27. 1
      deps/v8/src/OWNERS
  28. 139
      deps/v8/src/api-experimental.cc
  29. 28
      deps/v8/src/api-experimental.h
  30. 52
      deps/v8/src/api-natives.cc
  31. 797
      deps/v8/src/api.cc
  32. 15
      deps/v8/src/api.h
  33. 67
      deps/v8/src/arm/assembler-arm-inl.h
  34. 576
      deps/v8/src/arm/assembler-arm.cc
  35. 503
      deps/v8/src/arm/assembler-arm.h
  36. 406
      deps/v8/src/arm/code-stubs-arm.cc
  37. 4
      deps/v8/src/arm/code-stubs-arm.h
  38. 16
      deps/v8/src/arm/codegen-arm.cc
  39. 40
      deps/v8/src/arm/deoptimizer-arm.cc
  40. 230
      deps/v8/src/arm/disasm-arm.cc
  41. 57
      deps/v8/src/arm/interface-descriptors-arm.cc
  42. 225
      deps/v8/src/arm/macro-assembler-arm.cc
  43. 104
      deps/v8/src/arm/macro-assembler-arm.h
  44. 1202
      deps/v8/src/arm/simulator-arm.cc
  45. 8
      deps/v8/src/arm/simulator-arm.h
  46. 55
      deps/v8/src/arm64/assembler-arm64-inl.h
  47. 63
      deps/v8/src/arm64/assembler-arm64.cc
  48. 182
      deps/v8/src/arm64/assembler-arm64.h
  49. 487
      deps/v8/src/arm64/code-stubs-arm64.cc
  50. 40
      deps/v8/src/arm64/code-stubs-arm64.h
  51. 2
      deps/v8/src/arm64/codegen-arm64.cc
  52. 11
      deps/v8/src/arm64/constants-arm64.h
  53. 53
      deps/v8/src/arm64/deoptimizer-arm64.cc
  54. 11
      deps/v8/src/arm64/disasm-arm64.cc
  55. 1
      deps/v8/src/arm64/eh-frame-arm64.cc
  56. 21
      deps/v8/src/arm64/instructions-arm64.cc
  57. 10
      deps/v8/src/arm64/instructions-arm64.h
  58. 60
      deps/v8/src/arm64/instrument-arm64.cc
  59. 56
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  60. 27
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  61. 182
      deps/v8/src/arm64/macro-assembler-arm64.cc
  62. 93
      deps/v8/src/arm64/macro-assembler-arm64.h
  63. 314
      deps/v8/src/arm64/simulator-arm64.cc
  64. 91
      deps/v8/src/arm64/simulator-arm64.h
  65. 138
      deps/v8/src/asmjs/asm-js.cc
  66. 110
      deps/v8/src/asmjs/asm-names.h
  67. 2449
      deps/v8/src/asmjs/asm-parser.cc
  68. 316
      deps/v8/src/asmjs/asm-parser.h
  69. 431
      deps/v8/src/asmjs/asm-scanner.cc
  70. 165
      deps/v8/src/asmjs/asm-scanner.h
  71. 12
      deps/v8/src/asmjs/asm-wasm-builder.cc
  72. 130
      deps/v8/src/assembler.cc
  73. 81
      deps/v8/src/assembler.h
  74. 9
      deps/v8/src/ast/ast-expression-rewriter.cc
  75. 75
      deps/v8/src/ast/ast-numbering.cc
  76. 30
      deps/v8/src/ast/ast-numbering.h
  77. 9
      deps/v8/src/ast/ast-traversal-visitor.h
  78. 4
      deps/v8/src/ast/ast-types.cc
  79. 86
      deps/v8/src/ast/ast-value-factory.cc
  80. 211
      deps/v8/src/ast/ast-value-factory.h
  81. 68
      deps/v8/src/ast/ast.cc
  82. 175
      deps/v8/src/ast/ast.h
  83. 2
      deps/v8/src/ast/context-slot-cache.h
  84. 3
      deps/v8/src/ast/modules.h
  85. 73
      deps/v8/src/ast/prettyprinter.cc
  86. 264
      deps/v8/src/ast/scopes.cc
  87. 77
      deps/v8/src/ast/scopes.h
  88. 6
      deps/v8/src/ast/variables.h
  89. 9
      deps/v8/src/background-parsing-task.cc
  90. 12
      deps/v8/src/bailout-reason.h
  91. 3
      deps/v8/src/base/cpu.cc
  92. 2
      deps/v8/src/base/cpu.h
  93. 2
      deps/v8/src/base/debug/stack_trace.h
  94. 7
      deps/v8/src/base/iterator.h
  95. 16
      deps/v8/src/base/logging.cc
  96. 3
      deps/v8/src/base/logging.h
  97. 2
      deps/v8/src/base/platform/mutex.cc
  98. 15
      deps/v8/src/base/platform/platform-aix.cc
  99. 13
      deps/v8/src/base/platform/platform-cygwin.cc
  100. 23
      deps/v8/src/base/platform/platform-freebsd.cc

46
deps/v8/.gitignore

@ -1,3 +1,5 @@
#*#
*.Makefile
*.a *.a
*.exe *.exe
*.idb *.idb
@ -18,9 +20,9 @@
*.vcxproj *.vcxproj
*.vcxproj.filters *.vcxproj.filters
*.xcodeproj *.xcodeproj
#*#
*~ *~
.#* .#*
.*.sw?
.cpplint-cache .cpplint-cache
.cproject .cproject
.d8_history .d8_history
@ -30,26 +32,23 @@
.project .project
.pydevproject .pydevproject
.settings .settings
.*.sw?
bsuite
compile_commands.json
d8
d8_g
gccauses
gcsuspects
shell
shell_g
/_* /_*
/build /build
/gypfiles/win_toolchain.json
/buildtools /buildtools
/gypfiles/win_toolchain.json
/hydrogen.cfg /hydrogen.cfg
/obj /obj
/out /out
/out.gn /out.gn
/perf.data /perf.data
/perf.data.old /perf.data.old
/src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data /test/benchmarks/data
/test/fuzzer/wasm
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs
/test/fuzzer/wasm_asmjs.tar.gz
/test/mozilla/data /test/mozilla/data
/test/promises-aplus/promises-tests /test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz /test/promises-aplus/promises-tests.tar.gz
@ -57,6 +56,7 @@ shell_g
/test/test262/data /test/test262/data
/test/test262/data.tar /test/test262/data.tar
/test/test262/harness /test/test262/harness
/test/wasm-js
/testing/gmock /testing/gmock
/testing/gtest/* /testing/gtest/*
!/testing/gtest/include !/testing/gtest/include
@ -81,26 +81,26 @@ shell_g
/tools/swarming_client /tools/swarming_client
/tools/visual_studio/Debug /tools/visual_studio/Debug
/tools/visual_studio/Release /tools/visual_studio/Release
/test/fuzzer/wasm
/test/fuzzer/wasm_asmjs
/v8.log.ll /v8.log.ll
/xcodebuild /xcodebuild
TAGS GPATH
*.Makefile
GTAGS
GRTAGS GRTAGS
GSYMS GSYMS
GPATH GTAGS
tags TAGS
bsuite
compile_commands.json
d8
d8_g
gccauses
gcsuspects
gtags.files gtags.files
shell
shell_g
tags
turbo*.cfg turbo*.cfg
turbo*.dot turbo*.dot
turbo*.json turbo*.json
v8.ignition_dispatches_table.json v8.ignition_dispatches_table.json
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs.tar.gz
/src/inspector/build/closure-compiler.tar.gz
/src/inspector/build/closure-compiler
/test/wasm-js
!/third_party/jinja2 !/third_party/jinja2
!/third_party/markupsafe !/third_party/markupsafe

6
deps/v8/.gn

@ -21,5 +21,7 @@ check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists # These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly # to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls. # discouraged except for gypi_to_gn calls.
exec_script_whitelist = exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + [
build_dotfile_settings.exec_script_whitelist + [ "//test/test262/BUILD.gn" ] "//test/test262/BUILD.gn",
"//BUILD.gn",
]

3
deps/v8/AUTHORS

@ -1,4 +1,4 @@
# Below is a list of people and organizations that have contributed # Below is a list of people and organizations that have contributed
# to the V8 project. Names should be added to the list like so: # to the V8 project. Names should be added to the list like so:
# #
# Name/Organization <email address> # Name/Organization <email address>
@ -82,6 +82,7 @@ JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu> Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com> Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com> Kevin Gibbons <bakkot@gmail.com>
Loo Rong Jie <loorongjie@gmail.com>
Luis Reis <luis.m.reis@gmail.com> Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com> Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com> Maciej Małecki <me@mmalecki.com>

444
deps/v8/BUILD.gn

@ -20,6 +20,12 @@ declare_args() {
# Print to stdout on Android. # Print to stdout on Android.
v8_android_log_stdout = false v8_android_log_stdout = false
# Sets -DV8_ENABLE_FUTURE.
v8_enable_future = false
# Sets -DV8_DISABLE_TURBO.
v8_disable_turbo = false
# Sets -DVERIFY_HEAP. # Sets -DVERIFY_HEAP.
v8_enable_verify_heap = "" v8_enable_verify_heap = ""
@ -69,6 +75,9 @@ declare_args() {
# Sets -dV8_ENABLE_CHECKS. # Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = "" v8_enable_v8_checks = ""
# Builds the snapshot with --trace-ignition
v8_trace_ignition = false
# With post mortem support enabled, metadata is embedded into libv8 that # With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See # describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details. # tools/gen-postmortem-metadata.py for details.
@ -101,6 +110,19 @@ declare_args() {
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" || v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) || v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux) (v8_current_cpu == "ppc64" && is_linux)
# Set v8_host_byteorder
v8_host_byteorder = "little"
# ppc64 can be either BE or LE
if (host_cpu == "ppc64") {
v8_host_byteorder =
exec_script("//tools/get_byteorder.py", [], "trim string")
}
if (host_cpu == "ppc" || host_cpu == "s390" || host_cpu == "s390x" ||
host_cpu == "mips" || host_cpu == "mips64") {
v8_host_byteorder = "big"
}
} }
# Derived defaults. # Derived defaults.
@ -125,7 +147,6 @@ if (v8_enable_v8_checks == "") {
# snapshots. # snapshots.
is_target_simulator = target_cpu != v8_target_cpu is_target_simulator = target_cpu != v8_target_cpu
v8_generated_peephole_source = "$target_gen_dir/bytecode-peephole-table.cc"
v8_random_seed = "314159265" v8_random_seed = "314159265"
v8_toolset_for_shell = "host" v8_toolset_for_shell = "host"
@ -178,10 +199,10 @@ config("external_config") {
if (is_component_build) { if (is_component_build) {
defines = [ "USING_V8_SHARED" ] defines = [ "USING_V8_SHARED" ]
} }
include_dirs = [ "include" ] include_dirs = [
if (v8_enable_inspector) { "include",
include_dirs += [ "$target_gen_dir/include" ] "$target_gen_dir/include",
} ]
} }
# This config should only be applied to code that needs to be explicitly # This config should only be applied to code that needs to be explicitly
@ -204,6 +225,12 @@ config("features") {
defines += defines +=
[ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ] [ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ]
} }
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
if (v8_disable_turbo) {
defines += [ "V8_DISABLE_TURBO" ]
}
if (v8_enable_gdbjit) { if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ] defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
} }
@ -240,6 +267,9 @@ config("features") {
if (v8_enable_handle_zapping) { if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ] defines += [ "ENABLE_HANDLE_ZAPPING" ]
} }
if (v8_use_snapshot) {
defines += [ "V8_USE_SNAPSHOT" ]
}
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ] defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
} }
@ -356,8 +386,31 @@ config("toolchain") {
if (v8_current_cpu == "s390x") { if (v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390X" ] defines += [ "V8_TARGET_ARCH_S390X" ]
} }
if (host_cpu == "x64" || host_cpu == "x86") { if (v8_host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ] defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ]
} else {
cflags += [ "-march=z196" ]
}
}
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC" ]
if (v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC64" ]
}
if (v8_host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_PPC_LE" ]
} else if (v8_host_byteorder == "big") {
defines += [ "V8_TARGET_ARCH_PPC_BE" ]
if (current_os == "aix") {
cflags += [
# Work around AIX ceil, trunc and round oddities.
"-mcpu=power5+",
"-mfprnd",
# Work around AIX assembler popcntb bug.
"-mno-popcntb",
]
}
} }
} }
if (v8_current_cpu == "x86") { if (v8_current_cpu == "x86") {
@ -414,10 +467,25 @@ config("toolchain") {
# TODO(hans): Remove once http://crbug.com/428099 is resolved. # TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override", "-Winconsistent-missing-override",
] ]
#if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
# v8_current_cpu == "mips64el") { v8_current_cpu == "mips64el") {
# cflags += [ "-Wshorten-64-to-32" ] cflags += [ "-Wshorten-64-to-32" ]
#} }
}
if (is_win) {
cflags += [
"/wd4245", # Conversion with signed/unsigned mismatch.
"/wd4267", # Conversion with possible loss of data.
"/wd4324", # Padding structure due to alignment.
"/wd4701", # Potentially uninitialized local variable.
"/wd4702", # Unreachable code.
"/wd4703", # Potentially uninitialized local pointer variable.
"/wd4709", # Comma operator within array index expr (bugged).
"/wd4714", # Function marked forceinline not inlined.
"/wd4718", # Recursive call has no side-effect.
"/wd4800", # Forcing value to bool.
]
} }
} }
@ -445,7 +513,6 @@ action("js2c") {
"src/js/v8natives.js", "src/js/v8natives.js",
"src/js/array.js", "src/js/array.js",
"src/js/string.js", "src/js/string.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js", "src/js/typedarray.js",
"src/js/collection.js", "src/js/collection.js",
"src/js/weak-collection.js", "src/js/weak-collection.js",
@ -483,43 +550,6 @@ action("js2c") {
} }
} }
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [
"tools/jsmin.py",
]
# NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
"src/js/harmony-atomics.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc",
]
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc",
root_build_dir),
"EXPERIMENTAL",
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_experimental.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir),
]
}
}
action("js2c_extras") { action("js2c_extras") {
visibility = [ ":*" ] # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -630,7 +660,6 @@ if (v8_use_external_startup_data) {
deps = [ deps = [
":js2c", ":js2c",
":js2c_experimental",
":js2c_experimental_extras", ":js2c_experimental_extras",
":js2c_extras", ":js2c_extras",
] ]
@ -638,7 +667,6 @@ if (v8_use_external_startup_data) {
# NOSORT # NOSORT
sources = [ sources = [
"$target_gen_dir/libraries.bin", "$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin", "$target_gen_dir/libraries_extras.bin",
"$target_gen_dir/libraries_experimental_extras.bin", "$target_gen_dir/libraries_experimental_extras.bin",
] ]
@ -714,6 +742,10 @@ action("run_mksnapshot") {
] ]
} }
if (v8_trace_ignition) {
args += [ "--trace-ignition" ]
}
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ] outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [ args += [
@ -728,29 +760,6 @@ action("run_mksnapshot") {
} }
} }
action("run_mkpeephole") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":mkpeephole($v8_snapshot_toolchain)",
]
outputs = [
v8_generated_peephole_source,
]
sources = []
script = "tools/run.py"
args = [
"./" + rebase_path(get_label_info(":mkpeephole($v8_snapshot_toolchain)",
"root_out_dir") + "/mkpeephole",
root_build_dir),
rebase_path(v8_generated_peephole_source, root_build_dir),
]
}
action("v8_dump_build_config") { action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py" script = "tools/testrunner/utils/dump_build_config.py"
outputs = [ outputs = [
@ -769,7 +778,6 @@ action("v8_dump_build_config") {
"target_cpu=\"$target_cpu\"", "target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"", "v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support", "v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_inspector=$v8_enable_inspector",
"v8_target_cpu=\"$v8_target_cpu\"", "v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot", "v8_use_snapshot=$v8_use_snapshot",
] ]
@ -791,6 +799,7 @@ source_set("v8_maybe_snapshot") {
} else { } else {
# Ignore v8_use_external_startup_data setting if no snapshot is used. # Ignore v8_use_external_startup_data setting if no snapshot is used.
public_deps = [ public_deps = [
":v8_builtins_setup",
":v8_nosnapshot", ":v8_nosnapshot",
] ]
} }
@ -801,7 +810,6 @@ v8_source_set("v8_nosnapshot") {
deps = [ deps = [
":js2c", ":js2c",
":js2c_experimental",
":js2c_experimental_extras", ":js2c_experimental_extras",
":js2c_extras", ":js2c_extras",
":v8_base", ":v8_base",
@ -809,7 +817,6 @@ v8_source_set("v8_nosnapshot") {
sources = [ sources = [
"$target_gen_dir/experimental-extras-libraries.cc", "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc", "$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc", "$target_gen_dir/libraries.cc",
"src/snapshot/snapshot-empty.cc", "src/snapshot/snapshot-empty.cc",
@ -828,7 +835,6 @@ v8_source_set("v8_snapshot") {
deps = [ deps = [
":js2c", ":js2c",
":js2c_experimental",
":js2c_experimental_extras", ":js2c_experimental_extras",
":js2c_extras", ":js2c_extras",
":v8_base", ":v8_base",
@ -841,10 +847,10 @@ v8_source_set("v8_snapshot") {
sources = [ sources = [
"$target_gen_dir/experimental-extras-libraries.cc", "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc", "$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc", "$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc", "$target_gen_dir/snapshot.cc",
"src/setup-isolate-deserialize.cc",
] ]
configs = [ ":internal_config" ] configs = [ ":internal_config" ]
@ -856,7 +862,6 @@ if (v8_use_external_startup_data) {
deps = [ deps = [
":js2c", ":js2c",
":js2c_experimental",
":js2c_experimental_extras", ":js2c_experimental_extras",
":js2c_extras", ":js2c_extras",
":v8_base", ":v8_base",
@ -867,6 +872,7 @@ if (v8_use_external_startup_data) {
] ]
sources = [ sources = [
"src/setup-isolate-deserialize.cc",
"src/snapshot/natives-external.cc", "src/snapshot/natives-external.cc",
"src/snapshot/snapshot-external.cc", "src/snapshot/snapshot-external.cc",
] ]
@ -875,6 +881,138 @@ if (v8_use_external_startup_data) {
} }
} }
v8_source_set("v8_builtins_generators") {
visibility = [
":*",
"test/cctest:*",
"test/unittests:*",
]
deps = [
":v8_base",
]
sources = [
### gcmole(all) ###
"src/builtins/builtins-arguments-gen.cc",
"src/builtins/builtins-arguments-gen.h",
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-async-function-gen.cc",
"src/builtins/builtins-async-gen.cc",
"src/builtins/builtins-async-gen.h",
"src/builtins/builtins-async-generator-gen.cc",
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-constructor-gen.cc",
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-forin-gen.cc",
"src/builtins/builtins-forin-gen.h",
"src/builtins/builtins-function-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
"src/builtins/builtins-handler-gen.cc",
"src/builtins/builtins-ic-gen.cc",
"src/builtins/builtins-internal-gen.cc",
"src/builtins/builtins-interpreter-gen.cc",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
"src/builtins/builtins-string-gen.cc",
"src/builtins/builtins-symbol-gen.cc",
"src/builtins/builtins-typedarray-gen.cc",
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
"src/builtins/setup-builtins-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/binary-op-assembler.cc",
"src/ic/binary-op-assembler.h",
"src/ic/keyed-store-generic.cc",
"src/ic/keyed-store-generic.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-generator.cc",
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
"src/interpreter/setup-interpreter-internal.cc",
"src/interpreter/setup-interpreter.h",
]
if (v8_current_cpu == "x86") {
sources += [
### gcmole(arch:ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
### gcmole(arch:x64) ###
"src/builtins/x64/builtins-x64.cc",
]
} else if (v8_current_cpu == "arm") {
sources += [
### gcmole(arch:arm) ###
"src/builtins/arm/builtins-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
sources += [
### gcmole(arch:arm64) ###
"src/builtins/arm64/builtins-arm64.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [
### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
### gcmole(arch:ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [
### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
]
}
configs = [ ":internal_config" ]
}
v8_source_set("v8_builtins_setup") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":v8_builtins_generators",
]
sources = [
### gcmole(all) ###
"src/setup-isolate-full.cc",
]
configs = [ ":internal_config" ]
}
# This is split out to be a non-code containing target that the Chromium browser # This is split out to be a non-code containing target that the Chromium browser
# DLL can depend upon to get only a version string. # DLL can depend upon to get only a version string.
v8_header_set("v8_version") { v8_header_set("v8_version") {
@ -894,7 +1032,8 @@ v8_source_set("v8_base") {
### gcmole(all) ### ### gcmole(all) ###
"include/v8-debug.h", "include/v8-debug.h",
"include/v8-experimental.h", "include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-platform.h", "include/v8-platform.h",
"include/v8-profiler.h", "include/v8-profiler.h",
"include/v8-testing.h", "include/v8-testing.h",
@ -912,8 +1051,6 @@ v8_source_set("v8_base") {
"src/api-arguments-inl.h", "src/api-arguments-inl.h",
"src/api-arguments.cc", "src/api-arguments.cc",
"src/api-arguments.h", "src/api-arguments.h",
"src/api-experimental.cc",
"src/api-experimental.h",
"src/api-natives.cc", "src/api-natives.cc",
"src/api-natives.h", "src/api-natives.h",
"src/api.cc", "src/api.cc",
@ -922,6 +1059,11 @@ v8_source_set("v8_base") {
"src/arguments.h", "src/arguments.h",
"src/asmjs/asm-js.cc", "src/asmjs/asm-js.cc",
"src/asmjs/asm-js.h", "src/asmjs/asm-js.h",
"src/asmjs/asm-names.h",
"src/asmjs/asm-parser.cc",
"src/asmjs/asm-parser.h",
"src/asmjs/asm-scanner.cc",
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-typer.cc", "src/asmjs/asm-typer.cc",
"src/asmjs/asm-typer.h", "src/asmjs/asm-typer.h",
"src/asmjs/asm-types.cc", "src/asmjs/asm-types.cc",
@ -976,52 +1118,40 @@ v8_source_set("v8_base") {
"src/bootstrapper.cc", "src/bootstrapper.cc",
"src/bootstrapper.h", "src/bootstrapper.h",
"src/builtins/builtins-api.cc", "src/builtins/builtins-api.cc",
"src/builtins/builtins-arguments.cc",
"src/builtins/builtins-arguments.h",
"src/builtins/builtins-array.cc", "src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc", "src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-async-function.cc",
"src/builtins/builtins-async-iterator.cc",
"src/builtins/builtins-async.cc",
"src/builtins/builtins-async.h",
"src/builtins/builtins-boolean.cc", "src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc", "src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc", "src/builtins/builtins-callsite.cc",
"src/builtins/builtins-constructor.cc",
"src/builtins/builtins-constructor.h", "src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion.cc",
"src/builtins/builtins-dataview.cc", "src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc", "src/builtins/builtins-date.cc",
"src/builtins/builtins-debug.cc", "src/builtins/builtins-debug.cc",
"src/builtins/builtins-definitions.h",
"src/builtins/builtins-descriptors.h",
"src/builtins/builtins-error.cc", "src/builtins/builtins-error.cc",
"src/builtins/builtins-function.cc", "src/builtins/builtins-function.cc",
"src/builtins/builtins-generator.cc",
"src/builtins/builtins-global.cc", "src/builtins/builtins-global.cc",
"src/builtins/builtins-handler.cc",
"src/builtins/builtins-ic.cc",
"src/builtins/builtins-internal.cc", "src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc", "src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-json.cc", "src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc", "src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc", "src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc", "src/builtins/builtins-object.cc",
"src/builtins/builtins-object.h",
"src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-proxy.cc", "src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc", "src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc", "src/builtins/builtins-regexp.cc",
"src/builtins/builtins-regexp.h",
"src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc", "src/builtins/builtins-string.cc",
"src/builtins/builtins-symbol.cc", "src/builtins/builtins-symbol.cc",
"src/builtins/builtins-typedarray.cc", "src/builtins/builtins-typedarray.cc",
"src/builtins/builtins-utils.h", "src/builtins/builtins-utils.h",
"src/builtins/builtins-wasm.cc",
"src/builtins/builtins.cc", "src/builtins/builtins.cc",
"src/builtins/builtins.h", "src/builtins/builtins.h",
"src/cached-powers.cc", "src/cached-powers.cc",
"src/cached-powers.h", "src/cached-powers.h",
"src/callable.h",
"src/cancelable-task.cc", "src/cancelable-task.cc",
"src/cancelable-task.h", "src/cancelable-task.h",
"src/char-predicates-inl.h", "src/char-predicates-inl.h",
@ -1034,6 +1164,7 @@ v8_source_set("v8_base") {
"src/code-stub-assembler.cc", "src/code-stub-assembler.cc",
"src/code-stub-assembler.h", "src/code-stub-assembler.h",
"src/code-stubs-hydrogen.cc", "src/code-stubs-hydrogen.cc",
"src/code-stubs-utils.h",
"src/code-stubs.cc", "src/code-stubs.cc",
"src/code-stubs.h", "src/code-stubs.h",
"src/codegen.cc", "src/codegen.cc",
@ -1120,8 +1251,6 @@ v8_source_set("v8_base") {
"src/compiler/graph-assembler.h", "src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc", "src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h", "src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h",
"src/compiler/graph-trimmer.cc", "src/compiler/graph-trimmer.cc",
"src/compiler/graph-trimmer.h", "src/compiler/graph-trimmer.h",
"src/compiler/graph-visualizer.cc", "src/compiler/graph-visualizer.cc",
@ -1265,8 +1394,6 @@ v8_source_set("v8_base") {
"src/compiler/wasm-linkage.cc", "src/compiler/wasm-linkage.cc",
"src/compiler/zone-stats.cc", "src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h", "src/compiler/zone-stats.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts-inl.h", "src/contexts-inl.h",
"src/contexts.cc", "src/contexts.cc",
"src/contexts.h", "src/contexts.h",
@ -1393,8 +1520,6 @@ v8_source_set("v8_base") {
"src/external-reference-table.h", "src/external-reference-table.h",
"src/factory.cc", "src/factory.cc",
"src/factory.h", "src/factory.h",
"src/fast-accessor-assembler.cc",
"src/fast-accessor-assembler.h",
"src/fast-dtoa.cc", "src/fast-dtoa.cc",
"src/fast-dtoa.h", "src/fast-dtoa.h",
"src/feedback-vector-inl.h", "src/feedback-vector-inl.h",
@ -1433,6 +1558,8 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h", "src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc", "src/heap/code-stats.cc",
"src/heap/code-stats.h", "src/heap/code-stats.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h", "src/heap/embedder-tracing.h",
"src/heap/gc-idle-time-handler.cc", "src/heap/gc-idle-time-handler.cc",
@ -1476,8 +1603,6 @@ v8_source_set("v8_base") {
"src/ic/access-compiler-data.h", "src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc", "src/ic/access-compiler.cc",
"src/ic/access-compiler.h", "src/ic/access-compiler.h",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/call-optimization.cc", "src/ic/call-optimization.cc",
"src/ic/call-optimization.h", "src/ic/call-optimization.h",
"src/ic/handler-compiler.cc", "src/ic/handler-compiler.cc",
@ -1491,8 +1616,6 @@ v8_source_set("v8_base") {
"src/ic/ic-stats.h", "src/ic/ic-stats.h",
"src/ic/ic.cc", "src/ic/ic.cc",
"src/ic/ic.h", "src/ic/ic.h",
"src/ic/keyed-store-generic.cc",
"src/ic/keyed-store-generic.h",
"src/ic/stub-cache.cc", "src/ic/stub-cache.cc",
"src/ic/stub-cache.h", "src/ic/stub-cache.h",
"src/icu_util.cc", "src/icu_util.cc",
@ -1511,8 +1634,6 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-array-random-iterator.h", "src/interpreter/bytecode-array-random-iterator.h",
"src/interpreter/bytecode-array-writer.cc", "src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h", "src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-dead-code-optimizer.cc",
"src/interpreter/bytecode-dead-code-optimizer.h",
"src/interpreter/bytecode-decoder.cc", "src/interpreter/bytecode-decoder.cc",
"src/interpreter/bytecode-decoder.h", "src/interpreter/bytecode-decoder.h",
"src/interpreter/bytecode-flags.cc", "src/interpreter/bytecode-flags.cc",
@ -1523,9 +1644,6 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-label.h", "src/interpreter/bytecode-label.h",
"src/interpreter/bytecode-operands.cc", "src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h", "src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.cc",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-peephole-table.h",
"src/interpreter/bytecode-pipeline.cc", "src/interpreter/bytecode-pipeline.cc",
"src/interpreter/bytecode-pipeline.h", "src/interpreter/bytecode-pipeline.h",
"src/interpreter/bytecode-register-allocator.h", "src/interpreter/bytecode-register-allocator.h",
@ -1542,8 +1660,7 @@ v8_source_set("v8_base") {
"src/interpreter/control-flow-builders.h", "src/interpreter/control-flow-builders.h",
"src/interpreter/handler-table-builder.cc", "src/interpreter/handler-table-builder.cc",
"src/interpreter/handler-table-builder.h", "src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter-assembler.cc", "src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-intrinsics.cc", "src/interpreter/interpreter-intrinsics.cc",
"src/interpreter/interpreter-intrinsics.h", "src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc", "src/interpreter/interpreter.cc",
@ -1577,6 +1694,7 @@ v8_source_set("v8_base") {
"src/lookup.h", "src/lookup.h",
"src/machine-type.cc", "src/machine-type.cc",
"src/machine-type.h", "src/machine-type.h",
"src/macro-assembler-inl.h",
"src/macro-assembler.h", "src/macro-assembler.h",
"src/managed.h", "src/managed.h",
"src/map-updater.cc", "src/map-updater.cc",
@ -1591,6 +1709,15 @@ v8_source_set("v8_base") {
"src/objects-printer.cc", "src/objects-printer.cc",
"src/objects.cc", "src/objects.cc",
"src/objects.h", "src/objects.h",
"src/objects/code-cache-inl.h",
"src/objects/code-cache.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/descriptor-array.h",
"src/objects/dictionary.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
"src/objects/hash-table.h",
"src/objects/literal-objects.cc", "src/objects/literal-objects.cc",
"src/objects/literal-objects.h", "src/objects/literal-objects.h",
"src/objects/module-info.h", "src/objects/module-info.h",
@ -1599,9 +1726,9 @@ v8_source_set("v8_base") {
"src/objects/regexp-match-info.h", "src/objects/regexp-match-info.h",
"src/objects/scope-info.cc", "src/objects/scope-info.cc",
"src/objects/scope-info.h", "src/objects/scope-info.h",
"src/objects/string-table.h",
"src/ostreams.cc", "src/ostreams.cc",
"src/ostreams.h", "src/ostreams.h",
"src/parsing/duplicate-finder.cc",
"src/parsing/duplicate-finder.h", "src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h", "src/parsing/expression-classifier.h",
"src/parsing/func-name-inferrer.cc", "src/parsing/func-name-inferrer.cc",
@ -1729,6 +1856,7 @@ v8_source_set("v8_base") {
"src/runtime/runtime.h", "src/runtime/runtime.h",
"src/safepoint-table.cc", "src/safepoint-table.cc",
"src/safepoint-table.h", "src/safepoint-table.h",
"src/setup-isolate.h",
"src/signature.h", "src/signature.h",
"src/simulator.h", "src/simulator.h",
"src/small-pointer-list.h", "src/small-pointer-list.h",
@ -1776,6 +1904,9 @@ v8_source_set("v8_base") {
"src/transitions-inl.h", "src/transitions-inl.h",
"src/transitions.cc", "src/transitions.cc",
"src/transitions.h", "src/transitions.h",
"src/trap-handler/handler-outside.cc",
"src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
"src/trap-handler/trap-handler.h", "src/trap-handler/trap-handler.h",
"src/type-hints.cc", "src/type-hints.cc",
"src/type-hints.h", "src/type-hints.h",
@ -1852,7 +1983,6 @@ v8_source_set("v8_base") {
if (v8_current_cpu == "x86") { if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ### sources += [ ### gcmole(arch:ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc", "src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc", "src/compiler/ia32/instruction-scheduler-ia32.cc",
@ -1882,6 +2012,7 @@ v8_source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h", "src/ia32/macro-assembler-ia32.h",
"src/ia32/simulator-ia32.cc", "src/ia32/simulator-ia32.cc",
"src/ia32/simulator-ia32.h", "src/ia32/simulator-ia32.h",
"src/ia32/sse-instr.h",
"src/ic/ia32/access-compiler-ia32.cc", "src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc", "src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc", "src/ic/ia32/ic-ia32.cc",
@ -1890,7 +2021,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "x64") { } else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ### sources += [ ### gcmole(arch:x64) ###
"src/builtins/x64/builtins-x64.cc",
"src/compiler/x64/code-generator-x64.cc", "src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h", "src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-scheduler-x64.cc", "src/compiler/x64/instruction-scheduler-x64.cc",
@ -1931,6 +2061,9 @@ v8_source_set("v8_base") {
"src/x64/simulator-x64.h", "src/x64/simulator-x64.h",
"src/x64/sse-instr.h", "src/x64/sse-instr.h",
] ]
if (is_linux) {
sources += [ "src/trap-handler/handler-inside.cc" ]
}
} else if (v8_current_cpu == "arm") { } else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ### sources += [ ### gcmole(arch:arm) ###
"src/arm/assembler-arm-inl.h", "src/arm/assembler-arm-inl.h",
@ -1954,7 +2087,6 @@ v8_source_set("v8_base") {
"src/arm/macro-assembler-arm.h", "src/arm/macro-assembler-arm.h",
"src/arm/simulator-arm.cc", "src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h", "src/arm/simulator-arm.h",
"src/builtins/arm/builtins-arm.cc",
"src/compiler/arm/code-generator-arm.cc", "src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h", "src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-scheduler-arm.cc", "src/compiler/arm/instruction-scheduler-arm.cc",
@ -2008,7 +2140,6 @@ v8_source_set("v8_base") {
"src/arm64/simulator-arm64.h", "src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h", "src/arm64/utils-arm64.h",
"src/builtins/arm64/builtins-arm64.cc",
"src/compiler/arm64/code-generator-arm64.cc", "src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h", "src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc", "src/compiler/arm64/instruction-scheduler-arm64.cc",
@ -2034,7 +2165,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ### sources += [ ### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
"src/compiler/mips/code-generator-mips.cc", "src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h", "src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc", "src/compiler/mips/instruction-scheduler-mips.cc",
@ -2074,7 +2204,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ### sources += [ ### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
"src/compiler/mips64/code-generator-mips64.cc", "src/compiler/mips64/code-generator-mips64.cc",
"src/compiler/mips64/instruction-codes-mips64.h", "src/compiler/mips64/instruction-codes-mips64.h",
"src/compiler/mips64/instruction-scheduler-mips64.cc", "src/compiler/mips64/instruction-scheduler-mips64.cc",
@ -2114,7 +2243,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ### sources += [ ### gcmole(arch:ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
"src/compiler/ppc/code-generator-ppc.cc", "src/compiler/ppc/code-generator-ppc.cc",
"src/compiler/ppc/instruction-codes-ppc.h", "src/compiler/ppc/instruction-codes-ppc.h",
"src/compiler/ppc/instruction-scheduler-ppc.cc", "src/compiler/ppc/instruction-scheduler-ppc.cc",
@ -2154,7 +2282,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ### sources += [ ### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
"src/compiler/s390/code-generator-s390.cc", "src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h", "src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc", "src/compiler/s390/instruction-scheduler-s390.cc",
@ -2194,7 +2321,6 @@ v8_source_set("v8_base") {
] ]
} else if (v8_current_cpu == "x87") { } else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ### sources += [ ### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
"src/compiler/x87/code-generator-x87.cc", "src/compiler/x87/code-generator-x87.cc",
"src/compiler/x87/instruction-codes-x87.h", "src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc", "src/compiler/x87/instruction-scheduler-x87.cc",
@ -2239,16 +2365,9 @@ v8_source_set("v8_base") {
":v8_libbase", ":v8_libbase",
":v8_libsampler", ":v8_libsampler",
":v8_version", ":v8_version",
"src/inspector:inspector",
] ]
sources += [ v8_generated_peephole_source ]
deps += [ ":run_mkpeephole" ]
if (is_win) {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
cflags = [ "/wd4267" ]
}
if (v8_enable_i18n_support) { if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ] deps += [ "//third_party/icu" ]
if (is_win) { if (is_win) {
@ -2265,10 +2384,6 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ] sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ] deps += [ ":postmortem-metadata" ]
} }
if (v8_enable_inspector) {
deps += [ "src/inspector:inspector" ]
}
} }
v8_component("v8_libbase") { v8_component("v8_libbase") {
@ -2325,6 +2440,7 @@ v8_component("v8_libbase") {
"src/base/safe_math_impl.h", "src/base/safe_math_impl.h",
"src/base/sys-info.cc", "src/base/sys-info.cc",
"src/base/sys-info.h", "src/base/sys-info.h",
"src/base/timezone-cache.h",
"src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h", "src/base/utils/random-number-generator.h",
] ]
@ -2340,7 +2456,10 @@ v8_component("v8_libbase") {
} }
if (is_posix) { if (is_posix) {
sources += [ "src/base/platform/platform-posix.cc" ] sources += [
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
]
} }
if (is_linux) { if (is_linux) {
@ -2491,6 +2610,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [ deps = [
":v8_base", ":v8_base",
":v8_builtins_setup",
":v8_libbase", ":v8_libbase",
":v8_libplatform", ":v8_libplatform",
":v8_nosnapshot", ":v8_nosnapshot",
@ -2500,34 +2620,6 @@ if (current_toolchain == v8_snapshot_toolchain) {
} }
} }
v8_executable("mkpeephole") {
# mkpeephole needs to be built for the build host so the peephole lookup
# table can built during build. The table depends on the properties of
# bytecodes that are described in bytecodes.{cc,h}.
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/mkpeephole.cc",
]
configs = [
":external_config",
":internal_config",
]
deps = [
":v8_libbase",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
}
############################################################################### ###############################################################################
# Public targets # Public targets
# #
@ -2667,9 +2759,6 @@ v8_executable("d8") {
} }
defines = [] defines = []
if (v8_enable_inspector) {
defines += [ "V8_INSPECTOR_ENABLED" ]
}
if (v8_enable_vtunejit) { if (v8_enable_vtunejit) {
deps += [ "//src/third_party/vtune:v8_vtune" ] deps += [ "//src/third_party/vtune:v8_vtune" ]
@ -2869,17 +2958,6 @@ v8_source_set("wasm_module_runner") {
] ]
} }
v8_source_set("wasm_test_signatures") {
sources = [
"test/common/wasm/test-signatures.h",
]
configs = [
":external_config",
":internal_config_base",
]
}
v8_source_set("wasm_fuzzer") { v8_source_set("wasm_fuzzer") {
sources = [ sources = [
"test/fuzzer/wasm.cc", "test/fuzzer/wasm.cc",
@ -2920,13 +2998,13 @@ v8_fuzzer("wasm_asmjs_fuzzer") {
v8_source_set("wasm_code_fuzzer") { v8_source_set("wasm_code_fuzzer") {
sources = [ sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-code.cc", "test/fuzzer/wasm-code.cc",
] ]
deps = [ deps = [
":fuzzer_support", ":fuzzer_support",
":wasm_module_runner", ":wasm_module_runner",
":wasm_test_signatures",
] ]
configs = [ configs = [
@ -2940,13 +3018,13 @@ v8_fuzzer("wasm_code_fuzzer") {
v8_source_set("wasm_call_fuzzer") { v8_source_set("wasm_call_fuzzer") {
sources = [ sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-call.cc", "test/fuzzer/wasm-call.cc",
] ]
deps = [ deps = [
":fuzzer_support", ":fuzzer_support",
":wasm_module_runner", ":wasm_module_runner",
":wasm_test_signatures",
] ]
configs = [ configs = [
@ -3112,13 +3190,13 @@ v8_fuzzer("wasm_data_section_fuzzer") {
v8_source_set("wasm_compile_fuzzer") { v8_source_set("wasm_compile_fuzzer") {
sources = [ sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-compile.cc", "test/fuzzer/wasm-compile.cc",
] ]
deps = [ deps = [
":fuzzer_support", ":fuzzer_support",
":wasm_module_runner", ":wasm_module_runner",
":wasm_test_signatures",
] ]
configs = [ configs = [

1145
deps/v8/ChangeLog

File diff suppressed because it is too large

20
deps/v8/DEPS

@ -8,15 +8,15 @@ vars = {
deps = { deps = {
"v8/build": "v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "c7c2db69cd571523ce728c4d3dceedbd1896b519", Var("chromium_url") + "/chromium/src/build.git" + "@" + "94c06fe70f3f6429c59e3ec0f6acd4f6710050b2",
"v8/tools/gyp": "v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563", Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu": "v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5", Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5",
"v8/third_party/instrumented_libraries": "v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474", Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "05d5695a73e78b9cae55b8579fd8bf22b85eb283",
"v8/buildtools": "v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "94cdccbebc7a634c27145a3d84089e85fbb42e69", Var("chromium_url") + "/chromium/buildtools.git" + "@" + "d3074448541662f242bcee623049c13a231b5648",
"v8/base/trace_event/common": "v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290", Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2": "v8/third_party/jinja2":
@ -34,26 +34,22 @@ deps = {
"v8/test/mozilla/data": "v8/test/mozilla/data":
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be", Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/test262/data": "v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "a72ee6d91275aa6524e84a9b7070103411ef2689", Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
"v8/test/test262/harness": "v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd", Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang": "v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "9913fb19b687b0c858f697efd7bd2468d789a3d5", Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
"v8/test/wasm-js": "v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "b8b919e4a0d52db4d3d762e731e615bc3a38b3b2", Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "07fd6430f879d36928d179a62d9bdeed82286065",
} }
deps_os = { deps_os = {
"android": { "android": {
"v8/third_party/android_tools": "v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc", Var("chromium_url") + "/android_tools.git" + "@" + "b65c4776dac2cf1b80e969b3b2d4e081b9c84f29",
"v8/third_party/catapult": "v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "246a39a82c2213d913a96fff020a263838dc76e6", Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "9a55abab029cb9ae94f5160ded11b09a4638a955",
}, },
"win": {
"v8/third_party/cygwin":
Var("chromium_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
}
} }
recursedeps = [ recursedeps = [

1
deps/v8/OWNERS

@ -7,7 +7,6 @@ bradnelson@chromium.org
cbruni@chromium.org cbruni@chromium.org
clemensh@chromium.org clemensh@chromium.org
danno@chromium.org danno@chromium.org
epertoso@chromium.org
franzih@chromium.org franzih@chromium.org
gsathya@chromium.org gsathya@chromium.org
hablich@chromium.org hablich@chromium.org

28
deps/v8/PRESUBMIT.py

@ -31,6 +31,7 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl. for more details about the presubmit API built into gcl.
""" """
import re
import sys import sys
@ -250,6 +251,7 @@ def _CheckMissingFiles(input_api, output_api):
def _CommonChecks(input_api, output_api): def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit.""" """Checks common to both upload and commit."""
results = [] results = []
results.extend(_CheckCommitMessageBugEntry(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners( results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None)) input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted( results.extend(input_api.canned_checks.CheckPatchFormatted(
@ -276,6 +278,32 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip' return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckCommitMessageBugEntry(input_api, output_api):
"""Check that bug entries are well-formed in commit message."""
bogus_bug_msg = (
'Bogus BUG entry: %s. Please specify the issue tracker prefix and the '
'issue number, separated by a colon, e.g. v8:123 or chromium:12345.')
results = []
for bug in (input_api.change.BUG or '').split(','):
bug = bug.strip()
if 'none'.startswith(bug.lower()):
continue
if ':' not in bug:
try:
if int(bug) > 100000:
# Rough indicator for current chromium bugs.
prefix_guess = 'chromium'
else:
prefix_guess = 'v8'
results.append('BUG entry requires issue tracker prefix, e.g. %s:%s' %
(prefix_guess, bug))
except ValueError:
results.append(bogus_bug_msg % bug)
elif not re.match(r'\w+:\d+', bug):
results.append(bogus_bug_msg % bug)
return [output_api.PresubmitError(r) for r in results]
def CheckChangeOnUpload(input_api, output_api): def CheckChangeOnUpload(input_api, output_api):
results = [] results = []
results.extend(_CommonChecks(input_api, output_api)) results.extend(_CommonChecks(input_api, output_api))

7
deps/v8/gni/isolate.gni

@ -101,11 +101,6 @@ template("v8_isolate_run") {
} else { } else {
icu_use_data_file_flag = "0" icu_use_data_file_flag = "0"
} }
if (v8_enable_inspector) {
enable_inspector = "1"
} else {
enable_inspector = "0"
}
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
use_external_startup_data = "1" use_external_startup_data = "1"
} else { } else {
@ -177,8 +172,6 @@ template("v8_isolate_run") {
"--config-variable", "--config-variable",
"target_arch=$target_arch", "target_arch=$target_arch",
"--config-variable", "--config-variable",
"v8_enable_inspector=$enable_inspector",
"--config-variable",
"v8_use_external_startup_data=$use_external_startup_data", "v8_use_external_startup_data=$use_external_startup_data",
"--config-variable", "--config-variable",
"v8_use_snapshot=$use_snapshot", "v8_use_snapshot=$use_snapshot",

6
deps/v8/gni/v8.gni

@ -37,9 +37,6 @@ declare_args() {
# add a dependency on the ICU library. # add a dependency on the ICU library.
v8_enable_i18n_support = true v8_enable_i18n_support = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = true
# Use static libraries instead of source_sets. # Use static libraries instead of source_sets.
v8_static_library = false v8_static_library = false
} }
@ -66,9 +63,8 @@ v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.json"
# #
# Common configs to remove or add in all v8 targets. # Common configs to remove or add in all v8 targets.
v8_remove_configs = [ "//build/config/compiler:chromium_code" ] v8_remove_configs = []
v8_add_configs = [ v8_add_configs = [
"//build/config/compiler:no_chromium_code",
v8_path_prefix + ":features", v8_path_prefix + ":features",
v8_path_prefix + ":toolchain", v8_path_prefix + ":toolchain",
] ]

12
deps/v8/gypfiles/all.gyp

@ -9,6 +9,7 @@
'type': 'none', 'type': 'none',
'dependencies': [ 'dependencies': [
'../src/d8.gyp:d8', '../src/d8.gyp:d8',
'../test/inspector/inspector.gyp:*',
], ],
'conditions': [ 'conditions': [
['component!="shared_library"', { ['component!="shared_library"', {
@ -25,20 +26,11 @@
'../test/unittests/unittests.gyp:*', '../test/unittests/unittests.gyp:*',
], ],
}], }],
['v8_enable_inspector==1', {
'dependencies': [
'../test/inspector/inspector.gyp:*',
],
}],
['v8_enable_inspector==1 and test_isolation_mode != "noop"', {
'dependencies': [
'../test/debugger/debugger.gyp:*',
],
}],
['test_isolation_mode != "noop"', { ['test_isolation_mode != "noop"', {
'dependencies': [ 'dependencies': [
'../test/bot_default.gyp:*', '../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*', '../test/benchmarks/benchmarks.gyp:*',
'../test/debugger/debugger.gyp:*',
'../test/default.gyp:*', '../test/default.gyp:*',
'../test/intl/intl.gyp:*', '../test/intl/intl.gyp:*',
'../test/message/message.gyp:*', '../test/message/message.gyp:*',

3
deps/v8/gypfiles/features.gypi

@ -142,5 +142,8 @@
], # conditions ], # conditions
}, # Release }, # Release
}, # configurations }, # configurations
'defines': [
'V8_GYP_BUILD',
], # defines
}, # target_defaults }, # target_defaults
} }

1
deps/v8/gypfiles/isolate.gypi

@ -82,7 +82,6 @@
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)', '--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)', '--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)', '--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'v8_enable_inspector=<(v8_enable_inspector)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)', '--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)', '--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
], ],

25
deps/v8/gypfiles/standalone.gypi

@ -46,7 +46,6 @@
'msvs_multi_core_compile%': '1', 'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.7', 'mac_deployment_target%': '10.7',
'release_extra_cflags%': '', 'release_extra_cflags%': '',
'v8_enable_inspector%': 0,
'variables': { 'variables': {
'variables': { 'variables': {
'variables': { 'variables': {
@ -93,16 +92,16 @@
['OS=="linux" and use_sysroot==1', { ['OS=="linux" and use_sysroot==1', {
'conditions': [ 'conditions': [
['target_arch=="arm"', { ['target_arch=="arm"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_arm-sysroot', 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_arm-sysroot',
}], }],
['target_arch=="x64"', { ['target_arch=="x64"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_amd64-sysroot', 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_amd64-sysroot',
}], }],
['target_arch=="ia32"', { ['target_arch=="ia32"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_i386-sysroot', 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_i386-sysroot',
}], }],
['target_arch=="mipsel"', { ['target_arch=="mipsel"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_mips-sysroot', 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_mips-sysroot',
}], }],
], ],
}], # OS=="linux" and use_sysroot==1 }], # OS=="linux" and use_sysroot==1
@ -243,9 +242,6 @@
# Relative path to icu.gyp from this file. # Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp', 'icu_gyp_path': '../third_party/icu/icu.gyp',
# Relative path to inspector.gyp from this file.
'inspector_gyp_path': '../src/v8-inspector/inspector.gyp',
'conditions': [ 'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \ ['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \ (v8_target_arch=="arm64" and host_arch!="arm64") or \
@ -257,18 +253,6 @@
}, { }, {
'want_separate_host_toolset': 0, 'want_separate_host_toolset': 0,
}], }],
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="mips" and host_arch!="mips") or \
(v8_target_arch=="mips64" and host_arch!="mips64") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset_mkpeephole': 1,
}, {
'want_separate_host_toolset_mkpeephole': 0,
}],
['OS == "win"', { ['OS == "win"', {
'os_posix%': 0, 'os_posix%': 0,
}, { }, {
@ -870,6 +854,7 @@
], ],
}], }],
], ],
'msvs_cygwin_shell': 0,
'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'], 'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
'msvs_disabled_warnings': [ 'msvs_disabled_warnings': [
# C4091: 'typedef ': ignored on left of 'X' when no variable is # C4091: 'typedef ': ignored on left of 'X' when no variable is

1
deps/v8/gypfiles/toolchain.gypi

@ -74,7 +74,6 @@
# Chrome needs this definition unconditionally. For standalone V8 builds, # Chrome needs this definition unconditionally. For standalone V8 builds,
# it's handled in gypfiles/standalone.gypi. # it's handled in gypfiles/standalone.gypi.
'want_separate_host_toolset%': 1, 'want_separate_host_toolset%': 1,
'want_separate_host_toolset_mkpeephole%': 1,
# Toolset the shell binary should be compiled for. Possible values are # Toolset the shell binary should be compiled for. Possible values are
# 'host' and 'target'. # 'host' and 'target'.

8
deps/v8/include/libplatform/libplatform.h

@ -12,6 +12,8 @@
namespace v8 { namespace v8 {
namespace platform { namespace platform {
enum class IdleTaskSupport { kDisabled, kEnabled };
/** /**
* Returns a new instance of the default v8::Platform implementation. * Returns a new instance of the default v8::Platform implementation.
* *
@ -19,9 +21,13 @@ namespace platform {
* is the number of worker threads to allocate for background jobs. If a value * is the number of worker threads to allocate for background jobs. If a value
* of zero is passed, a suitable default based on the current number of * of zero is passed, a suitable default based on the current number of
* processors online will be chosen. * processors online will be chosen.
* If |idle_task_support| is enabled then the platform will accept idle
* tasks (IdleTasksEnabled will return true) and will rely on the embedder
* calling v8::platform::RunIdleTasks to process the idle tasks.
*/ */
V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform( V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
int thread_pool_size = 0); int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled);
/** /**
* Pumps the message loop for the given isolate. * Pumps the message loop for the given isolate.

43
deps/v8/include/v8-debug.h

@ -8,7 +8,9 @@
#include "v8.h" // NOLINT(build/include) #include "v8.h" // NOLINT(build/include)
/** /**
* Debugger support for the V8 JavaScript engine. * ATTENTION: The debugger API exposed by this file is deprecated and will be
* removed by the end of 2017. Please use the V8 inspector declared
* in include/v8-inspector.h instead.
*/ */
namespace v8 { namespace v8 {
@ -140,21 +142,19 @@ class V8_EXPORT Debug {
*/ */
typedef void (*MessageHandler)(const Message& message); typedef void (*MessageHandler)(const Message& message);
/** V8_DEPRECATED("No longer supported", static bool SetDebugEventListener(
* This is now a no-op. Isolate* isolate, EventCallback that,
*/ Local<Value> data = Local<Value>()));
typedef void (*DebugMessageDispatchHandler)();
static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
// Schedule a debugger break to happen when JavaScript code is run // Schedule a debugger break to happen when JavaScript code is run
// in the given isolate. // in the given isolate.
static void DebugBreak(Isolate* isolate); V8_DEPRECATED("No longer supported",
static void DebugBreak(Isolate* isolate));
// Remove scheduled debugger break in given isolate if it has not // Remove scheduled debugger break in given isolate if it has not
// happened yet. // happened yet.
static void CancelDebugBreak(Isolate* isolate); V8_DEPRECATED("No longer supported",
static void CancelDebugBreak(Isolate* isolate));
// Check if a debugger break is scheduled in the given isolate. // Check if a debugger break is scheduled in the given isolate.
V8_DEPRECATED("No longer supported", V8_DEPRECATED("No longer supported",
@ -189,10 +189,10 @@ class V8_EXPORT Debug {
* } * }
* \endcode * \endcode
*/ */
// TODO(dcarney): data arg should be a MaybeLocal V8_DEPRECATED("No longer supported",
static MaybeLocal<Value> Call(Local<Context> context, static MaybeLocal<Value> Call(
v8::Local<v8::Function> fun, Local<Context> context, v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()); Local<Value> data = Local<Value>()));
// This is now a no-op. // This is now a no-op.
V8_DEPRECATED("No longer supported", V8_DEPRECATED("No longer supported",
@ -221,23 +221,28 @@ class V8_EXPORT Debug {
* (default Isolate if not provided). V8 will abort if LiveEdit is * (default Isolate if not provided). V8 will abort if LiveEdit is
* unexpectedly used. LiveEdit is enabled by default. * unexpectedly used. LiveEdit is enabled by default.
*/ */
static void SetLiveEditEnabled(Isolate* isolate, bool enable); V8_DEPRECATED("No longer supported",
static void SetLiveEditEnabled(Isolate* isolate, bool enable));
/** /**
* Returns array of internal properties specific to the value type. Result has * Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array * the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context. * will be allocated in the current context.
*/ */
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate, V8_DEPRECATED("No longer supported",
Local<Value> value); static MaybeLocal<Array> GetInternalProperties(
Isolate* isolate, Local<Value> value));
/** /**
* Defines if the ES2015 tail call elimination feature is enabled or not. * Defines if the ES2015 tail call elimination feature is enabled or not.
* The change of this flag triggers deoptimization of all functions that * The change of this flag triggers deoptimization of all functions that
* contain calls at tail position. * contain calls at tail position.
*/ */
static bool IsTailCallEliminationEnabled(Isolate* isolate); V8_DEPRECATED("No longer supported",
static void SetTailCallEliminationEnabled(Isolate* isolate, bool enabled); static bool IsTailCallEliminationEnabled(Isolate* isolate));
V8_DEPRECATED("No longer supported",
static void SetTailCallEliminationEnabled(Isolate* isolate,
bool enabled));
}; };

58
deps/v8/include/v8-experimental.h

@ -1,58 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* This header contains a set of experimental V8 APIs. We hope these will
* become a part of standard V8, but they may also be removed if we deem the
* experiment to not be successul.
*/
#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
#define V8_INCLUDE_V8_EXPERIMENTAL_H_
#include "v8.h" // NOLINT(build/include)
namespace v8 {
namespace experimental {
// Allow the embedder to construct accessors that V8 can compile and use
// directly, without jumping into the runtime.
class V8_EXPORT FastAccessorBuilder {
public:
struct ValueId {
size_t value_id;
};
struct LabelId {
size_t label_id;
};
static FastAccessorBuilder* New(Isolate* isolate);
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no);
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
ValueId ToSmi(ValueId value_id);
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void Goto(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
ValueId Call(v8::FunctionCallback callback, ValueId value_id);
private:
FastAccessorBuilder() = delete;
FastAccessorBuilder(const FastAccessorBuilder&) = delete;
~FastAccessorBuilder() = delete;
void operator=(const FastAccessorBuilder&) = delete;
};
} // namespace experimental
} // namespace v8
#endif // V8_INCLUDE_V8_EXPERIMENTAL_H_

2
deps/v8/include/v8-inspector.h

@ -224,8 +224,6 @@ class V8_EXPORT V8Inspector {
virtual void resetContextGroup(int contextGroupId) = 0; virtual void resetContextGroup(int contextGroupId) = 0;
// Various instrumentation. // Various instrumentation.
virtual void willExecuteScript(v8::Local<v8::Context>, int scriptId) = 0;
virtual void didExecuteScript(v8::Local<v8::Context>) = 0;
virtual void idleStarted() = 0; virtual void idleStarted() = 0;
virtual void idleFinished() = 0; virtual void idleFinished() = 0;

8
deps/v8/include/v8-platform.h

@ -212,6 +212,14 @@ class Platform {
/** Removes tracing state change observer. */ /** Removes tracing state change observer. */
virtual void RemoveTraceStateObserver(TraceStateObserver*) {} virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
typedef void (*StackTracePrinter)();
/**
* Returns a function pointer that print a stack trace of the current stack
* on invocation. Disables printing of the stack trace if nullptr.
*/
virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
}; };
} // namespace v8 } // namespace v8

5
deps/v8/include/v8-profiler.h

@ -812,11 +812,6 @@ class V8_EXPORT HeapProfiler {
/** Returns memory used for profiler internal data and snapshots. */ /** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize(); size_t GetProfilerMemorySize();
/**
* Sets a RetainedObjectInfo for an object group (see V8::SetObjectGroupId).
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
private: private:
HeapProfiler(); HeapProfiler();
~HeapProfiler(); ~HeapProfiler();

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build // NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts. // system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5 #define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 8 #define V8_MINOR_VERSION 9
#define V8_BUILD_NUMBER 283 #define V8_BUILD_NUMBER 211
#define V8_PATCH_LEVEL 41 #define V8_PATCH_LEVEL 32
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)

650
deps/v8/include/v8.h

File diff suppressed because it is too large

6
deps/v8/infra/config/cq.cfg

@ -32,6 +32,10 @@ verifiers {
buckets { buckets {
name: "master.tryserver.v8" name: "master.tryserver.v8"
builders { name: "v8_android_arm_compile_rel" } builders { name: "v8_android_arm_compile_rel" }
builders {
name: "v8_node_linux64_rel"
experiment_percentage: 100
}
builders { name: "v8_linux64_asan_rel_ng" } builders { name: "v8_linux64_asan_rel_ng" }
builders { builders {
name: "v8_linux64_asan_rel_ng_triggered" name: "v8_linux64_asan_rel_ng_triggered"
@ -119,7 +123,7 @@ verifiers {
} }
builders { builders {
name: "v8_linux64_sanitizer_coverage_rel" name: "v8_linux64_sanitizer_coverage_rel"
experiment_percentage: 20 experiment_percentage: 100
} }
} }
buckets { buckets {

69
deps/v8/infra/mb/mb_config.pyl

@ -24,6 +24,18 @@
'mips64el.debug': 'default_debug_mips64el', 'mips64el.debug': 'default_debug_mips64el',
'mips64el.optdebug': 'default_optdebug_mips64el', 'mips64el.optdebug': 'default_optdebug_mips64el',
'mips64el.release': 'default_release_mips64el', 'mips64el.release': 'default_release_mips64el',
'ppc.debug': 'default_debug_ppc',
'ppc.optdebug': 'default_optdebug_ppc',
'ppc.release': 'default_release_ppc',
'ppc64.debug': 'default_debug_ppc64',
'ppc64.optdebug': 'default_optdebug_ppc64',
'ppc64.release': 'default_release_ppc64',
's390.debug': 'default_debug_s390',
's390.optdebug': 'default_optdebug_s390',
's390.release': 'default_release_s390',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
'x64.debug': 'default_debug_x64', 'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64', 'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64', 'x64.release': 'default_release_x64',
@ -92,11 +104,11 @@
# original config also specified -O1, which we dropped because chromium # original config also specified -O1, which we dropped because chromium
# doesn't have it (anymore). # doesn't have it (anymore).
'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized', 'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized',
'V8 Linux - vtunejit': 'gyp_debug_x86_vtunejit', 'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage', 'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage',
'V8 Linux - predictable': 'gyp_release_x86_predictable', 'V8 Linux - predictable': 'gn_release_x86_predictable',
'V8 Linux - full debug': 'gyp_full_debug_x86', 'V8 Linux - full debug': 'gyp_full_debug_x86',
'V8 Linux - interpreted regexp': 'gyp_release_x86_interpreted_regexp', 'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86', 'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86',
}, },
@ -234,6 +246,34 @@
'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'], 'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [ 'default_release_mips64el': [
'gn', 'release', 'simulate_mips64el'], 'gn', 'release', 'simulate_mips64el'],
'default_debug_ppc': [
'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_ppc': [
'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc': [
'gn', 'release', 'simulate_ppc'],
'default_debug_ppc64': [
'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_ppc64': [
'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64': [
'gn', 'release', 'simulate_ppc64'],
'default_debug_s390': [
'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_s390': [
'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'default_release_s390': [
'gn', 'release', 'simulate_s390'],
'default_debug_s390x': [
'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_s390x': [
'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
'default_release_s390x': [
'gn', 'release', 'simulate_s390x'],
'default_debug_x64': [ 'default_debug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'], 'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [ 'default_optdebug_x64': [
@ -350,14 +390,15 @@
'gn_debug_x86_minimal_symbols': [ 'gn_debug_x86_minimal_symbols': [
'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'], 'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_debug_x86_no_i18n': [ 'gn_debug_x86_no_i18n': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_disable_inspector', 'gn', 'debug_bot', 'x86', 'swarming', 'v8_no_i18n'],
'v8_no_i18n'],
'gn_debug_x86_no_snap': [ 'gn_debug_x86_no_snap': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'], 'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_no_snap_trybot': [ 'gn_debug_x86_no_snap_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'], 'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_trybot': [ 'gn_debug_x86_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming'], 'gn', 'debug_trybot', 'x86', 'swarming'],
'gn_debug_x86_vtunejit': [
'gn', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
# GN release configs for x86. # GN release configs for x86.
'gn_release_x86': [ 'gn_release_x86': [
@ -370,11 +411,12 @@
'gn', 'release_bot', 'x86', 'gcmole', 'swarming'], 'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_gcmole_trybot': [ 'gn_release_x86_gcmole_trybot': [
'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'], 'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_interpreted_regexp': [
'gn', 'release_bot', 'x86', 'v8_interpreted_regexp'],
'gn_release_x86_minimal_symbols': [ 'gn_release_x86_minimal_symbols': [
'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'], 'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_release_x86_no_i18n_trybot': [ 'gn_release_x86_no_i18n_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_disable_inspector', 'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
'v8_no_i18n'],
'gn_release_x86_no_snap': [ 'gn_release_x86_no_snap': [
'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'], 'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_no_snap_shared_minimal_symbols': [ 'gn_release_x86_no_snap_shared_minimal_symbols': [
@ -382,6 +424,8 @@
'v8_snapshot_none'], 'v8_snapshot_none'],
'gn_release_x86_no_snap_trybot': [ 'gn_release_x86_no_snap_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'], 'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_predictable': [
'gn', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
'gn_release_x86_shared_verify_heap': [ 'gn_release_x86_shared_verify_heap': [
'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'], 'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
'gn_release_x86_trybot': [ 'gn_release_x86_trybot': [
@ -397,8 +441,6 @@
# Gyp debug configs for x86. # Gyp debug configs for x86.
'gyp_debug_x86': [ 'gyp_debug_x86': [
'gyp', 'debug_bot', 'x86', 'swarming'], 'gyp', 'debug_bot', 'x86', 'swarming'],
'gyp_debug_x86_vtunejit': [
'gyp', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
'gyp_full_debug_x86': [ 'gyp_full_debug_x86': [
'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks', 'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'], 'v8_full_debug'],
@ -432,10 +474,6 @@
# Gyp release configs for x86. # Gyp release configs for x86.
'gyp_release_x86_disassembler': [ 'gyp_release_x86_disassembler': [
'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'], 'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gyp_release_x86_interpreted_regexp': [
'gyp', 'release_bot', 'x86', 'v8_interpreted_regexp'],
'gyp_release_x86_predictable': [
'gyp', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
}, },
'mixins': { 'mixins': {
@ -661,11 +699,6 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true', 'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
}, },
'v8_disable_inspector': {
'gn_args': 'v8_enable_inspector=false',
'gyp_defines': 'v8_enable_inspector=0 ',
},
'v8_enable_disassembler': { 'v8_enable_disassembler': {
'gn_args': 'v8_enable_disassembler=true', 'gn_args': 'v8_enable_disassembler=true',
'gyp_defines': 'v8_enable_disassembler=1', 'gyp_defines': 'v8_enable_disassembler=1',

3
deps/v8/src/DEPS

@ -18,6 +18,9 @@ include_rules = [
"+src/interpreter/bytecode-register.h", "+src/interpreter/bytecode-register.h",
"+src/interpreter/bytecodes.h", "+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h", "+src/interpreter/interpreter.h",
"+src/interpreter/setup-interpreter.h",
"-src/trap-handler",
"+src/trap-handler/trap-handler.h",
"+testing/gtest/include/gtest/gtest_prod.h", "+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform", "-src/libplatform",
"-include/libplatform" "-include/libplatform"

1
deps/v8/src/OWNERS

@ -1,4 +1,5 @@
per-file i18n.*=cira@chromium.org per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com per-file i18n.*=mnita@google.com
per-file i18n.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org per-file typing-asm.*=bradnelson@chromium.org

139
deps/v8/src/api-experimental.cc

@ -1,139 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* Implementation for v8-experimental.h.
*/
#include "src/api-experimental.h"
#include "include/v8-experimental.h"
#include "include/v8.h"
#include "src/api.h"
#include "src/fast-accessor-assembler.h"
#include "src/objects-inl.h"
namespace {
v8::internal::FastAccessorAssembler* FromApi(
v8::experimental::FastAccessorBuilder* builder) {
return reinterpret_cast<v8::internal::FastAccessorAssembler*>(builder);
}
v8::experimental::FastAccessorBuilder* FromInternal(
v8::internal::FastAccessorAssembler* fast_accessor_assembler) {
return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
fast_accessor_assembler);
}
} // namespace
namespace v8 {
namespace internal {
namespace experimental {
MaybeHandle<Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler) {
i::MaybeHandle<i::Code> code;
if (fast_handler != nullptr) {
auto faa = FromApi(fast_handler);
code = faa->Build();
CHECK(!code.is_null());
delete faa;
}
return code;
}
} // namespace experimental
} // namespace internal
namespace experimental {
FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal::FastAccessorAssembler* faa =
new internal::FastAccessorAssembler(i_isolate);
return FromInternal(faa);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::IntegerConstant(
int const_value) {
return FromApi(this)->IntegerConstant(const_value);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::GetReceiver() {
return FromApi(this)->GetReceiver();
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalField(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalFieldUnchecked(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalFieldUnchecked(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
int offset) {
return FromApi(this)->LoadValue(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
int offset) {
return FromApi(this)->LoadObject(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::ToSmi(ValueId value_id) {
return FromApi(this)->ToSmi(value_id);
}
void FastAccessorBuilder::ReturnValue(ValueId value) {
FromApi(this)->ReturnValue(value);
}
void FastAccessorBuilder::CheckFlagSetOrReturnNull(ValueId value_id, int mask) {
FromApi(this)->CheckFlagSetOrReturnNull(value_id, mask);
}
void FastAccessorBuilder::CheckNotZeroOrReturnNull(ValueId value_id) {
FromApi(this)->CheckNotZeroOrReturnNull(value_id);
}
FastAccessorBuilder::LabelId FastAccessorBuilder::MakeLabel() {
return FromApi(this)->MakeLabel();
}
void FastAccessorBuilder::SetLabel(LabelId label_id) {
FromApi(this)->SetLabel(label_id);
}
void FastAccessorBuilder::Goto(LabelId label_id) {
FromApi(this)->Goto(label_id);
}
void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::Call(
v8::FunctionCallback callback, ValueId value_id) {
return FromApi(this)->Call(callback, value_id);
}
} // namespace experimental
} // namespace v8

28
deps/v8/src/api-experimental.h

@ -1,28 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_API_EXPERIMENTAL_H_
#define V8_API_EXPERIMENTAL_H_
namespace v8 {
namespace internal {
class Code;
template <typename T>
class MaybeHandle;
} // internal;
namespace experimental {
class FastAccessorBuilder;
} // experimental
namespace internal {
namespace experimental {
v8::internal::MaybeHandle<v8::internal::Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler);
} // namespace experimental
} // namespace internal
} // namespace v8
#endif // V8_API_EXPERIMENTAL_H_

52
deps/v8/src/api-natives.cc

@ -36,7 +36,8 @@ class InvokeScope {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate, MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> data, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target, Handle<JSReceiver> new_target,
bool is_hidden_prototype); bool is_hidden_prototype,
bool is_prototype);
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate, MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data, Handle<FunctionTemplateInfo> data,
@ -49,7 +50,7 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<FunctionTemplateInfo>::cast(data), name); Handle<FunctionTemplateInfo>::cast(data), name);
} else if (data->IsObjectTemplateInfo()) { } else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data), return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
Handle<JSReceiver>(), false); Handle<JSReceiver>(), false, false);
} else { } else {
return data; return data;
} }
@ -338,7 +339,8 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate, MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info, Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target, Handle<JSReceiver> new_target,
bool is_hidden_prototype) { bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor; Handle<JSFunction> constructor;
int serial_number = Smi::cast(info->serial_number())->value(); int serial_number = Smi::cast(info->serial_number())->value();
if (!new_target.is_null()) { if (!new_target.is_null()) {
@ -379,19 +381,26 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<JSObject> object; Handle<JSObject> object;
ASSIGN_RETURN_ON_EXCEPTION(isolate, object, ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject); JSObject::New(constructor, new_target), JSObject);
if (is_prototype) JSObject::OptimizeAsPrototype(object, FAST_PROTOTYPE);
ASSIGN_RETURN_ON_EXCEPTION( ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, isolate, result,
ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject); ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
if (info->immutable_proto()) { if (info->immutable_proto()) {
JSObject::SetImmutableProto(object); JSObject::SetImmutableProto(object);
} }
// TODO(dcarney): is this necessary? if (!is_prototype) {
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject"); // Keep prototypes in slow-mode. Let them be lazily turned fast later on.
// TODO(dcarney): is this necessary?
if (serial_number) { JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
CacheTemplateInstantiation(isolate, serial_number, result); // Don't cache prototypes.
result = isolate->factory()->CopyJSObject(result); if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
result = isolate->factory()->CopyJSObject(result);
}
} }
return result; return result;
} }
@ -446,7 +455,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject( InstantiateObject(
isolate, isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate), handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
Handle<JSReceiver>(), data->hidden_prototype()), Handle<JSReceiver>(), data->hidden_prototype(), true),
JSFunction); JSFunction);
} }
Object* parent = data->parent_template(); Object* parent = data->parent_template();
@ -514,7 +523,8 @@ MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) { Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
Isolate* isolate = data->GetIsolate(); Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate); InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateObject(isolate, data, new_target, false); return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
false);
} }
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject( MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
@ -524,22 +534,14 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<FunctionTemplateInfo> constructor( Handle<FunctionTemplateInfo> constructor(
FunctionTemplateInfo::cast(data->constructor())); FunctionTemplateInfo::cast(data->constructor()));
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, constructor);
Handle<Map> initial_map = isolate->factory()->CreateSloppyFunctionMap(
FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Handle<JSFunction> object_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
initial_map, shared, isolate->factory()->undefined_value());
Handle<Map> object_map = isolate->factory()->NewMap( Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE, JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->internal_field_count() * kPointerSize, JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
FAST_HOLEY_SMI_ELEMENTS); FAST_HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(object_function, object_map, object_map->SetConstructor(*constructor);
isolate->factory()->null_value());
object_map->set_is_access_check_needed(true); object_map->set_is_access_check_needed(true);
Handle<JSObject> object = isolate->factory()->NewJSObject(object_function); Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(object_map);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value()); JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
return object; return object;
@ -629,18 +631,18 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
DONT_ENUM); DONT_ENUM);
} }
int internal_field_count = 0; int embedder_field_count = 0;
bool immutable_proto = false; bool immutable_proto = false;
if (!obj->instance_template()->IsUndefined(isolate)) { if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>( Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template())); ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count = instance_template->internal_field_count(); embedder_field_count = instance_template->embedder_field_count();
immutable_proto = instance_template->immutable_proto(); immutable_proto = instance_template->immutable_proto();
} }
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize. // JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count; int instance_size = kPointerSize * embedder_field_count;
InstanceType type; InstanceType type;
switch (instance_type) { switch (instance_type) {
case JavaScriptObjectType: case JavaScriptObjectType:

797
deps/v8/src/api.cc

File diff suppressed because it is too large

15
deps/v8/src/api.h

@ -106,12 +106,13 @@ class RegisteredExtension {
V(Context, Context) \ V(Context, Context) \
V(External, Object) \ V(External, Object) \
V(StackTrace, JSArray) \ V(StackTrace, JSArray) \
V(StackFrame, JSObject) \ V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \ V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \ V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \ V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \ V(debug::Script, Script) \
V(Promise, JSPromise) V(Promise, JSPromise) \
V(DynamicImportResult, JSPromise)
class Utils { class Utils {
public: public:
@ -185,10 +186,12 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal( static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj); v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<DynamicImportResult> PromiseToDynamicImportResult(
v8::internal::Handle<v8::internal::JSPromise> obj);
static inline Local<StackTrace> StackTraceToLocal( static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::JSArray> obj); v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<StackFrame> StackFrameToLocal( static inline Local<StackFrame> StackFrameToLocal(
v8::internal::Handle<v8::internal::JSObject> obj); v8::internal::Handle<v8::internal::StackFrameInfo> obj);
static inline Local<Number> NumberToLocal( static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal( static inline Local<Integer> IntegerToLocal(
@ -317,8 +320,9 @@ MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature) MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message) MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise) MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace) MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame) MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
@ -347,6 +351,8 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE #undef MAKE_OPEN_HANDLE
#undef OPEN_HANDLE_LIST #undef OPEN_HANDLE_LIST
extern Isolate* IsolateNewImpl(internal::Isolate* isolate,
const Isolate::CreateParams& params);
namespace internal { namespace internal {
@ -645,7 +651,6 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
(!blocks_.is_empty() && prev_limit != NULL)); (!blocks_.is_empty() && prev_limit != NULL));
} }
// Interceptor functions called from generated inline caches to notify // Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked. // CPU profiler that external callbacks are invoked.
void InvokeAccessorGetterCallback( void InvokeAccessorGetterCallback(

67
deps/v8/src/arm/assembler-arm-inl.h

@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; } bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return true; } bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
int DoubleRegister::NumRegisters() { int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
@ -98,32 +98,28 @@ int RelocInfo::target_address_size() {
return kPointerSize; return kPointerSize;
} }
HeapObject* RelocInfo::target_object() {
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); return HeapObject::cast(
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
} }
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>( return Handle<HeapObject>(
Assembler::target_address_at(pc_, host_))); reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
} }
void RelocInfo::set_target_object(HeapObject* target,
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_, Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target), reinterpret_cast<Address>(target),
icache_flush_mode); icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host() != NULL && host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target->IsHeapObject()) { target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
host()->GetHeap()->RecordWriteIntoCode(host(), this, target); host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
} }
} }
@ -152,13 +148,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address(); return target_address();
} }
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_)); DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode); set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
} }
@ -187,13 +182,9 @@ void RelocInfo::set_target_cell(Cell* cell,
} }
} }
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm. UNREACHABLE(); // This should never be reached on Arm.
return Handle<Object>(); return Handle<Code>();
} }
@ -221,27 +212,25 @@ Address RelocInfo::debug_call_address() {
return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset); return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
} }
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()); DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) = Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target; target;
if (host() != NULL) { if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target); Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
host(), this, HeapObject::cast(target_code)); target_code);
} }
} }
void RelocInfo::WipeOut(Isolate* isolate) {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)); IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) { if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL; Memory::Address_at(pc_) = NULL;
} else { } else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL); Assembler::set_target_address_at(isolate, pc_, host_, NULL);
} }
} }
@ -299,6 +288,7 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode; rmode_ = rmode;
} }
Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f) { Operand::Operand(const ExternalReference& f) {
rm_ = no_reg; rm_ = no_reg;
@ -322,14 +312,6 @@ Operand::Operand(Register rm) {
} }
bool Operand::is_reg() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
void Assembler::CheckBuffer() { void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) { if (buffer_space() <= kGap) {
GrowBuffer(); GrowBuffer();
@ -542,6 +524,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::set_target_address_at(Isolate* isolate, Address pc, void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
if (is_constant_pool_load(pc)) { if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool. // This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target; Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
@ -602,6 +585,8 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode); set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
} }
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

576
deps/v8/src/arm/assembler-arm.cc

@ -39,9 +39,11 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" #include "src/arm/assembler-arm-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/cpu.h" #include "src/base/cpu.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -357,13 +359,13 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
} }
void RelocInfo::unchecked_update_wasm_memory_reference( void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) { Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode); Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
} }
void RelocInfo::unchecked_update_wasm_size(uint32_t size, void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) { ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, Assembler::set_target_address_at(isolate, pc_, host_,
reinterpret_cast<Address>(size), flush_mode); reinterpret_cast<Address>(size), flush_mode);
} }
@ -466,7 +468,6 @@ NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
SetAlignment(align); SetAlignment(align);
} }
void NeonMemOperand::SetAlignment(int align) { void NeonMemOperand::SetAlignment(int align) {
switch (align) { switch (align) {
case 0: case 0:
@ -549,8 +550,8 @@ const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | Register::kCode_fp * B16; al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000; const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size), : AssemblerBase(isolate_data, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()), recorded_ast_id_(TypeFeedbackId::None()),
pending_32_bit_constants_(), pending_32_bit_constants_(),
pending_64_bit_constants_(), pending_64_bit_constants_(),
@ -939,25 +940,25 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) { if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov // If the target fits in a byte then only patch with a mov
// instruction. // instruction.
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1, PatchingAssembler patcher(isolate_data(),
CodePatcher::DONT_FLUSH); reinterpret_cast<byte*>(buffer_ + pos), 1);
patcher.masm()->mov(dst, Operand(target24)); patcher.mov(dst, Operand(target24));
} else { } else {
uint16_t target16_0 = target24 & kImm16Mask; uint16_t target16_0 = target24 & kImm16Mask;
uint16_t target16_1 = target24 >> 16; uint16_t target16_1 = target24 >> 16;
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt. // Patch with movw/movt.
if (target16_1 == 0) { if (target16_1 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), PatchingAssembler patcher(isolate_data(),
1, CodePatcher::DONT_FLUSH); reinterpret_cast<byte*>(buffer_ + pos), 1);
CpuFeatureScope scope(patcher.masm(), ARMv7); CpuFeatureScope scope(&patcher, ARMv7);
patcher.masm()->movw(dst, target16_0); patcher.movw(dst, target16_0);
} else { } else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), PatchingAssembler patcher(isolate_data(),
2, CodePatcher::DONT_FLUSH); reinterpret_cast<byte*>(buffer_ + pos), 2);
CpuFeatureScope scope(patcher.masm(), ARMv7); CpuFeatureScope scope(&patcher, ARMv7);
patcher.masm()->movw(dst, target16_0); patcher.movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1); patcher.movt(dst, target16_1);
} }
} else { } else {
// Patch with a sequence of mov/orr/orr instructions. // Patch with a sequence of mov/orr/orr instructions.
@ -965,16 +966,16 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8; uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask; uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) { if (target8_2 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), PatchingAssembler patcher(isolate_data(),
2, CodePatcher::DONT_FLUSH); reinterpret_cast<byte*>(buffer_ + pos), 2);
patcher.masm()->mov(dst, Operand(target8_0)); patcher.mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8)); patcher.orr(dst, dst, Operand(target8_1 << 8));
} else { } else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), PatchingAssembler patcher(isolate_data(),
3, CodePatcher::DONT_FLUSH); reinterpret_cast<byte*>(buffer_ + pos), 3);
patcher.masm()->mov(dst, Operand(target8_0)); patcher.mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8)); patcher.orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16)); patcher.orr(dst, dst, Operand(target8_2 << 16));
} }
} }
} }
@ -1523,6 +1524,10 @@ void Assembler::sub(Register dst, Register src1, const Operand& src2,
addrmod1(cond | SUB | s, src1, dst, src2); addrmod1(cond | SUB | s, src1, dst, src2);
} }
void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
sub(dst, src1, Operand(src2), s, cond);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2, void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
@ -1535,6 +1540,10 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
addrmod1(cond | ADD | s, src1, dst, src2); addrmod1(cond | ADD | s, src1, dst, src2);
} }
void Assembler::add(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
add(dst, src1, Operand(src2), s, cond);
}
void Assembler::adc(Register dst, Register src1, const Operand& src2, void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
@ -1558,6 +1567,9 @@ void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TST | S, src1, r0, src2); addrmod1(cond | TST | S, src1, r0, src2);
} }
void Assembler::tst(Register src1, Register src2, Condition cond) {
tst(src1, Operand(src2), cond);
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) { void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TEQ | S, src1, r0, src2); addrmod1(cond | TEQ | S, src1, r0, src2);
@ -1568,6 +1580,9 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMP | S, src1, r0, src2); addrmod1(cond | CMP | S, src1, r0, src2);
} }
void Assembler::cmp(Register src1, Register src2, Condition cond) {
cmp(src1, Operand(src2), cond);
}
void Assembler::cmp_raw_immediate( void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) { Register src, int raw_immediate, Condition cond) {
@ -1586,6 +1601,10 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
addrmod1(cond | ORR | s, src1, dst, src2); addrmod1(cond | ORR | s, src1, dst, src2);
} }
void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
orr(dst, src1, Operand(src2), s, cond);
}
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using // Don't allow nop instructions in the form mov rn, rn to be generated using
@ -1595,6 +1614,9 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | MOV | s, r0, dst, src); addrmod1(cond | MOV | s, r0, dst, src);
} }
void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
mov(dst, Operand(src), s, cond);
}
void Assembler::mov_label_offset(Register dst, Label* label) { void Assembler::mov_label_offset(Register dst, Label* label) {
if (label->is_bound()) { if (label->is_bound()) {
@ -1657,6 +1679,32 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | MVN | s, r0, dst, src); addrmod1(cond | MVN | s, r0, dst, src);
} }
void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
}
}
void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
}
}
void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
}
}
// Multiply instructions. // Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
@ -2233,19 +2281,12 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__ #ifndef __arm__
DCHECK(code >= kDefaultStopCode); DCHECK(code >= kDefaultStopCode);
{ {
// The Simulator will handle the stop instruction and get the message
// address. It expects to find the address just after the svc instruction.
BlockConstPoolScope block_const_pool(this); BlockConstPoolScope block_const_pool(this);
if (code >= 0) { if (code >= 0) {
svc(kStopCode + code, cond); svc(kStopCode + code, cond);
} else { } else {
svc(kStopCode + kMaxStopCode, cond); svc(kStopCode + kMaxStopCode, cond);
} }
// Do not embed the message string address! We used to do this, but that
// made snapshots created from position-independent executable builds
// non-deterministic.
// TODO(yangguo): remove this field entirely.
nop();
} }
#else // def __arm__ #else // def __arm__
if (cond != al) { if (cond != al) {
@ -3005,13 +3046,9 @@ static void SplitRegCode(VFPType reg_type,
int* m) { int* m) {
DCHECK((reg_code >= 0) && (reg_code <= 31)); DCHECK((reg_code >= 0) && (reg_code <= 31));
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) { if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
// 32 bit type. SwVfpRegister::split_code(reg_code, vm, m);
*m = reg_code & 0x1;
*vm = reg_code >> 1;
} else { } else {
// 64 bit type. DwVfpRegister::split_code(reg_code, vm, m);
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
} }
} }
@ -3854,9 +3891,7 @@ void Assembler::vld1(NeonSize size,
dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code()); dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
} }
void Assembler::vst1(NeonSize size, const NeonListOperand& src,
void Assembler::vst1(NeonSize size,
const NeonListOperand& src,
const NeonMemOperand& dst) { const NeonMemOperand& dst) {
// Instruction details available in ARM DDI 0406C.b, A8.8.404. // Instruction details available in ARM DDI 0406C.b, A8.8.404.
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
@ -3884,6 +3919,21 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
0xA * B8 | m * B5 | B4 | vm); 0xA * B8 | m * B5 | B4 | vm);
} }
void Assembler::vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.1004.
// vqmovn.<type><size> Dd, Qm. ARM vector narrowing move with saturation.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
int op = u != 0 ? 3 : 2;
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | 0x2 * B16 | vd * B12 |
0x2 * B8 | op * B6 | m * B5 | vm);
}
static int EncodeScalar(NeonDataType dt, int index) { static int EncodeScalar(NeonDataType dt, int index) {
int opc1_opc2 = 0; int opc1_opc2 = 0;
DCHECK_LE(0, index); DCHECK_LE(0, index);
@ -3935,51 +3985,13 @@ void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
n * B7 | B4 | opc1_opc2); n * B7 | B4 | opc1_opc2);
} }
void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) { void Assembler::vmov(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8-938. // Instruction details available in ARM DDI 0406C.b, A8-938.
// vmov is encoded as vorr. // vmov is encoded as vorr.
vorr(dst, src, src); vorr(dst, src, src);
} }
void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) { void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
}
void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
vm);
}
void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
const Register src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-886. // Instruction details available in ARM DDI 0406C.b, A8-886.
int B = 0, E = 0; int B = 0, E = 0;
@ -4003,7 +4015,7 @@ void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
0xB * B8 | d * B7 | E * B5 | B4); 0xB * B8 | d * B7 | E * B5 | B4);
} }
void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) { void Assembler::vdup(QwNeonRegister dst, SwVfpRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-884. // Instruction details available in ARM DDI 0406C.b, A8-884.
int index = src.code() & 1; int index = src.code() & 1;
@ -4019,8 +4031,8 @@ void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
} }
// Encode NEON vcvt.src_type.dst_type instruction. // Encode NEON vcvt.src_type.dst_type instruction.
static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst, static Instr EncodeNeonVCVT(VFPType dst_type, QwNeonRegister dst,
const VFPType src_type, const QwNeonRegister src) { VFPType src_type, QwNeonRegister src) {
DCHECK(src_type != dst_type); DCHECK(src_type != dst_type);
DCHECK(src_type == F32 || dst_type == F32); DCHECK(src_type == F32 || dst_type == F32);
// Instruction details available in ARM DDI 0406C.b, A8.8.868. // Instruction details available in ARM DDI 0406C.b, A8.8.868.
@ -4042,103 +4054,142 @@ static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
B6 | m * B5 | vm; B6 | m * B5 | vm;
} }
void Assembler::vcvt_f32_s32(const QwNeonRegister dst, void Assembler::vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst)); DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src)); DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, S32, src)); emit(EncodeNeonVCVT(F32, dst, S32, src));
} }
void Assembler::vcvt_f32_u32(const QwNeonRegister dst, void Assembler::vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst)); DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src)); DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, U32, src)); emit(EncodeNeonVCVT(F32, dst, U32, src));
} }
void Assembler::vcvt_s32_f32(const QwNeonRegister dst, void Assembler::vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst)); DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src)); DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(S32, dst, F32, src)); emit(EncodeNeonVCVT(S32, dst, F32, src));
} }
void Assembler::vcvt_u32_f32(const QwNeonRegister dst, void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst)); DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src)); DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(U32, dst, F32, src)); emit(EncodeNeonVCVT(U32, dst, F32, src));
} }
// op is instr->Bits(11, 7). enum NeonRegType { NEON_D, NEON_Q };
static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
const QwNeonRegister dst, void NeonSplitCode(NeonRegType type, int code, int* vm, int* m, int* encoding) {
const QwNeonRegister src) { if (type == NEON_D) {
DCHECK_IMPLIES(is_float, size == Neon32); DwVfpRegister::split_code(code, vm, m);
} else {
DCHECK_EQ(type, NEON_Q);
QwNeonRegister::split_code(code, vm, m);
*encoding |= B6;
}
}
enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
int dst_code, int src_code) {
int op_encoding = 0;
switch (op) {
case VMVN:
DCHECK_EQ(Neon8, size); // size == 0 for vmvn
op_encoding = B10 | 0x3 * B7;
break;
case VSWP:
DCHECK_EQ(Neon8, size); // size == 0 for vswp
op_encoding = B17;
break;
case VABS:
op_encoding = B16 | 0x6 * B7;
break;
case VABSF:
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x6 * B7;
break;
case VNEG:
op_encoding = B16 | 0x7 * B7;
break;
case VNEGF:
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x7 * B7;
break;
default:
UNREACHABLE();
break;
}
int vd, d; int vd, d;
dst.split_code(&vd, &d); NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vm, m; int vm, m;
src.split_code(&vm, &m); NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
int F = is_float ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 | return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | vd * B12 | m * B5 |
F * B10 | B8 | op * B7 | B6 | m * B5 | vm; vm | op_encoding;
}
void Assembler::vmvn(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vmvn(Qn, Qm) SIMD bitwise negate.
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VMVN, NEON_Q, Neon8, dst.code(), src.code()));
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
DCHECK(IsEnabled(NEON));
// Dd = vswp(Dn, Dm) SIMD d-register swap.
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VSWP, NEON_D, Neon8, dst.code(), src.code()));
} }
void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) { void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vswp(Qn, Qm) SIMD q-register swap.
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VSWP, NEON_Q, Neon8, dst.code(), src.code()));
}
void Assembler::vabs(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value. // Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824. // Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src)); emit(EncodeNeonUnaryOp(VABSF, NEON_Q, Neon32, dst.code(), src.code()));
} }
void Assembler::vabs(NeonSize size, const QwNeonRegister dst, void Assembler::vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value. // Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824. // Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, false, size, dst, src)); emit(EncodeNeonUnaryOp(VABS, NEON_Q, size, dst.code(), src.code()));
} }
void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) { void Assembler::vneg(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate. // Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968. // Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src)); emit(EncodeNeonUnaryOp(VNEGF, NEON_Q, Neon32, dst.code(), src.code()));
} }
void Assembler::vneg(NeonSize size, const QwNeonRegister dst, void Assembler::vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer negate. // Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968. // Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, false, size, dst, src)); emit(EncodeNeonUnaryOp(VNEG, NEON_Q, size, dst.code(), src.code()));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
B4 | vm);
} }
enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN }; enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, NeonRegType reg_type,
const QwNeonRegister dst, int dst_code, int src_code1,
const QwNeonRegister src1, int src_code2) {
const QwNeonRegister src2) {
int op_encoding = 0; int op_encoding = 0;
switch (op) { switch (op) {
case VBIC: case VBIC:
@ -4170,13 +4221,14 @@ static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
break; break;
} }
int vd, d; int vd, d;
dst.split_code(&vd, &d); NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vn, n; int vn, n;
src1.split_code(&vn, &n); NeonSplitCode(reg_type, src_code1, &vn, &n, &op_encoding);
int vm, m; int vm, m;
src2.split_code(&vm, &m); NeonSplitCode(reg_type, src_code2, &vm, &m, &op_encoding);
return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 | return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
n * B7 | B6 | m * B5 | B4 | vm; n * B7 | m * B5 | B4 | vm;
} }
void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1, void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
@ -4184,15 +4236,26 @@ void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
// Qd = vand(Qn, Qm) SIMD AND. // Qd = vand(Qn, Qm) SIMD AND.
// Instruction details available in ARM DDI 0406C.b, A8.8.836. // Instruction details available in ARM DDI 0406C.b, A8.8.836.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2)); emit(EncodeNeonBinaryBitwiseOp(VAND, NEON_Q, dst.code(), src1.code(),
src2.code()));
} }
void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1, void Assembler::vbsl(QwNeonRegister dst, QwNeonRegister src1,
const QwNeonRegister src2) { QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vbsl(Qn, Qm) SIMD bitwise select. // Qd = vbsl(Qn, Qm) SIMD bitwise select.
// Instruction details available in ARM DDI 0406C.b, A8-844. // Instruction details available in ARM DDI 0406C.b, A8-844.
emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VBSL, NEON_Q, dst.code(), src1.code(),
src2.code()));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_D, dst.code(), src1.code(),
src2.code()));
} }
void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1, void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
@ -4200,7 +4263,8 @@ void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
// Qd = veor(Qn, Qm) SIMD exclusive OR. // Qd = veor(Qn, Qm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888. // Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2)); emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_Q, dst.code(), src1.code(),
src2.code()));
} }
void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1, void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
@ -4208,7 +4272,8 @@ void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
// Qd = vorr(Qn, Qm) SIMD OR. // Qd = vorr(Qn, Qm) SIMD OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.976. // Instruction details available in ARM DDI 0406C.b, A8.8.976.
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2)); emit(EncodeNeonBinaryBitwiseOp(VORR, NEON_Q, dst.code(), src1.code(),
src2.code()));
} }
enum FPBinOp { enum FPBinOp {
@ -4287,9 +4352,8 @@ enum IntegerBinOp {
}; };
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt, static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
const QwNeonRegister dst, QwNeonRegister dst, QwNeonRegister src1,
const QwNeonRegister src1, QwNeonRegister src2) {
const QwNeonRegister src2) {
int op_encoding = 0; int op_encoding = 0;
switch (op) { switch (op) {
case VADD: case VADD:
@ -4341,10 +4405,8 @@ static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
n * B7 | B6 | m * B5 | vm | op_encoding; n * B7 | B6 | m * B5 | vm | op_encoding;
} }
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size, static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size, QwNeonRegister dst,
const QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) {
const QwNeonRegister src1,
const QwNeonRegister src2) {
// Map NeonSize values to the signed values in NeonDataType, so the U bit // Map NeonSize values to the signed values in NeonDataType, so the U bit
// will be 0. // will be 0.
return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2); return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
@ -4406,16 +4468,16 @@ void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VMULF, dst, src1, src2)); emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
} }
void Assembler::vmul(NeonSize size, QwNeonRegister dst, void Assembler::vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
const QwNeonRegister src1, const QwNeonRegister src2) { QwNeonRegister src2) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply. // Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960. // Instruction details available in ARM DDI 0406C.b, A8-960.
emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2)); emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
} }
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1, void Assembler::vmin(QwNeonRegister dst, QwNeonRegister src1,
const QwNeonRegister src2) { QwNeonRegister src2) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN. // Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928. // Instruction details available in ARM DDI 0406C.b, A8-928.
@ -4529,6 +4591,51 @@ void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2)); emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
} }
enum NeonPairwiseOp { VPMIN, VPMAX };
static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
int op_encoding = 0;
switch (op) {
case VPMIN:
op_encoding = 0xA * B8 | B4;
break;
case VPMAX:
op_encoding = 0xA * B8;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
n * B7 | m * B5 | vm | op_encoding;
}
void Assembler::vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// Dd = vpmin(Dn, Dm) SIMD integer pairwise MIN.
// Instruction details available in ARM DDI 0406C.b, A8-986.
emit(EncodeNeonPairwiseOp(VPMIN, dt, dst, src1, src2));
}
void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// Dd = vpmax(Dn, Dm) SIMD integer pairwise MAX.
// Instruction details available in ARM DDI 0406C.b, A8-986.
emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
}
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) { QwNeonRegister src2) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
@ -4585,8 +4692,8 @@ void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2)); emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
} }
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1, void Assembler::vext(QwNeonRegister dst, QwNeonRegister src1,
const QwNeonRegister src2, int bytes) { QwNeonRegister src2, int bytes) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
// Qd = vext(Qn, Qm) SIMD byte extract. // Qd = vext(Qn, Qm) SIMD byte extract.
// Instruction details available in ARM DDI 0406C.b, A8-890. // Instruction details available in ARM DDI 0406C.b, A8-890.
@ -4601,57 +4708,110 @@ void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
n * B7 | B6 | m * B5 | vm); n * B7 | B6 | m * B5 | vm);
} }
void Assembler::vzip(NeonSize size, QwNeonRegister dst, enum NeonSizedOp { VZIP, VUZP, VREV16, VREV32, VREV64, VTRN };
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); static Instr EncodeNeonSizedOp(NeonSizedOp op, NeonRegType reg_type,
// Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave). NeonSize size, int dst_code, int src_code) {
// Instruction details available in ARM DDI 0406C.b, A8-1102. int op_encoding = 0;
switch (op) {
case VZIP:
op_encoding = 0x2 * B16 | 0x3 * B7;
break;
case VUZP:
op_encoding = 0x2 * B16 | 0x2 * B7;
break;
case VREV16:
op_encoding = 0x2 * B7;
break;
case VREV32:
op_encoding = 0x1 * B7;
break;
case VREV64:
// op_encoding is 0;
break;
case VTRN:
op_encoding = 0x2 * B16 | B7;
break;
default:
UNREACHABLE();
break;
}
int vd, d; int vd, d;
dst.split_code(&vd, &d); NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vm, m; int vm, m;
src.split_code(&vm, &m); NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
int sz = static_cast<int>(size); int sz = static_cast<int>(size);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 | return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | m * B5 |
0x3 * B7 | B6 | m * B5 | vm); vm | op_encoding;
}
void Assembler::vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// vzip.<size>(Dn, Dm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code()));
}
void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// vzip.<size>(Qn, Qm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
emit(EncodeNeonSizedOp(VZIP, NEON_Q, size, src1.code(), src2.code()));
} }
static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size, void Assembler::vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
const QwNeonRegister dst, DCHECK(IsEnabled(NEON));
const QwNeonRegister src) { // vuzp.<size>(Dn, Dm) SIMD un-zip (de-interleave).
// Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse. // Instruction details available in ARM DDI 0406C.b, A8-1100.
emit(EncodeNeonSizedOp(VUZP, NEON_D, size, src1.code(), src2.code()));
}
void Assembler::vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// vuzp.<size>(Qn, Qm) SIMD un-zip (de-interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1100.
emit(EncodeNeonSizedOp(VUZP, NEON_Q, size, src1.code(), src2.code()));
}
void Assembler::vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrev16.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028. // Instruction details available in ARM DDI 0406C.b, A8-1028.
DCHECK_GT(op_size, static_cast<int>(size)); emit(EncodeNeonSizedOp(VREV16, NEON_Q, size, dst.code(), src.code()));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
B6 | m * B5 | vm;
} }
void Assembler::vrev16(NeonSize size, const QwNeonRegister dst, void Assembler::vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon16, size, dst, src)); // Qd = vrev32.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
emit(EncodeNeonSizedOp(VREV32, NEON_Q, size, dst.code(), src.code()));
} }
void Assembler::vrev32(NeonSize size, const QwNeonRegister dst, void Assembler::vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon32, size, dst, src)); // Qd = vrev64.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
emit(EncodeNeonSizedOp(VREV64, NEON_Q, size, dst.code(), src.code()));
} }
void Assembler::vrev64(NeonSize size, const QwNeonRegister dst, void Assembler::vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon64, size, dst, src)); // vtrn.<size>(Dn, Dm) SIMD element transpose.
// Instruction details available in ARM DDI 0406C.b, A8-1096.
emit(EncodeNeonSizedOp(VTRN, NEON_D, size, src1.code(), src2.code()));
}
void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// vtrn.<size>(Qn, Qm) SIMD element transpose.
// Instruction details available in ARM DDI 0406C.b, A8-1096.
emit(EncodeNeonSizedOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
} }
// Encode NEON vtbl / vtbx instruction. // Encode NEON vtbl / vtbx instruction.
static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list, static Instr EncodeNeonVTB(DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index, bool vtbx) { DwVfpRegister index, bool vtbx) {
// Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices. // Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094. // Instruction details available in ARM DDI 0406C.b, A8-1094.
// Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices. // Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
@ -4667,14 +4827,14 @@ static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
list.length() * B8 | n * B7 | op * B6 | m * B5 | vm; list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
} }
void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list, void Assembler::vtbl(DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) { DwVfpRegister index) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, false)); emit(EncodeNeonVTB(dst, list, index, false));
} }
void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list, void Assembler::vtbx(DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) { DwVfpRegister index) {
DCHECK(IsEnabled(NEON)); DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, true)); emit(EncodeNeonVTB(dst, list, index, true));
} }
@ -4690,6 +4850,7 @@ void Assembler::nop(int type) {
emit(al | 13*B21 | type*B12 | type); emit(al | 13*B21 | type*B12 | type);
} }
void Assembler::pop() { add(sp, sp, Operand(kPointerSize)); }
bool Assembler::IsMovT(Instr instr) { bool Assembler::IsMovT(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
@ -4873,7 +5034,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
data = RecordedAstId().ToInt(); data = RecordedAstId().ToInt();
ClearRecordedAstId(); ClearRecordedAstId();
} }
RelocInfo rinfo(isolate(), pc_, rmode, data, NULL); RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
} }
@ -5227,6 +5388,29 @@ void Assembler::PatchConstantPoolAccessInstruction(
} }
} }
PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
int instructions)
: Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
PatchingAssembler::~PatchingAssembler() {
// Check that we don't have any pending constant pools.
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
// Check that the code was patched as expected.
DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
void PatchingAssembler::Emit(Address addr) {
emit(reinterpret_cast<Instr>(addr));
}
void PatchingAssembler::FlushICache(Isolate* isolate) {
Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

503
deps/v8/src/arm/assembler-arm.h

@ -114,7 +114,7 @@ struct Register {
kCode_no_reg = -1 kCode_no_reg = -1
}; };
static const int kNumRegisters = Code::kAfterLast; static constexpr int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) { static Register from_code(int code) {
DCHECK(code >= 0); DCHECK(code >= 0);
@ -144,13 +144,13 @@ struct Register {
// r7: context register // r7: context register
// r8: constant pool pointer register if FLAG_enable_embedded_constant_pool. // r8: constant pool pointer register if FLAG_enable_embedded_constant_pool.
// r9: lithium scratch // r9: lithium scratch
#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R}; #define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER) GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER #undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg}; constexpr Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = false; constexpr bool kSimpleFPAliasing = false;
static const bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
// Single word VFP register. // Single word VFP register.
struct SwVfpRegister { struct SwVfpRegister {
@ -162,9 +162,9 @@ struct SwVfpRegister {
kCode_no_reg = -1 kCode_no_reg = -1
}; };
static const int kMaxNumRegisters = Code::kAfterLast; static constexpr int kMaxNumRegisters = Code::kAfterLast;
static const int kSizeInBytes = 4; static constexpr int kSizeInBytes = 4;
bool is_valid() const { return 0 <= reg_code && reg_code < 32; } bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; } bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
@ -180,11 +180,14 @@ struct SwVfpRegister {
SwVfpRegister r = {code}; SwVfpRegister r = {code};
return r; return r;
} }
void split_code(int* vm, int* m) const { static void split_code(int reg_code, int* vm, int* m) {
DCHECK(is_valid()); DCHECK(from_code(reg_code).is_valid());
*m = reg_code & 0x1; *m = reg_code & 0x1;
*vm = reg_code >> 1; *vm = reg_code >> 1;
} }
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
int reg_code; int reg_code;
}; };
@ -201,7 +204,7 @@ struct DwVfpRegister {
kCode_no_reg = -1 kCode_no_reg = -1
}; };
static const int kMaxNumRegisters = Code::kAfterLast; static constexpr int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters(); inline static int NumRegisters();
@ -209,7 +212,7 @@ struct DwVfpRegister {
// hold 0.0, that does not fit in the immediate field of vmov instructions. // hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0 // d14: 0.0
// d15: scratch register. // d15: scratch register.
static const int kSizeInBytes = 8; static constexpr int kSizeInBytes = 8;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; } bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; } bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
@ -226,11 +229,14 @@ struct DwVfpRegister {
DwVfpRegister r = {code}; DwVfpRegister r = {code};
return r; return r;
} }
void split_code(int* vm, int* m) const { static void split_code(int reg_code, int* vm, int* m) {
DCHECK(is_valid()); DCHECK(from_code(reg_code).is_valid());
*m = (reg_code & 0x10) >> 4; *m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F; *vm = reg_code & 0x0F;
} }
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
int reg_code; int reg_code;
}; };
@ -242,10 +248,9 @@ typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15. // Double word VFP register d0-15.
struct LowDwVfpRegister { struct LowDwVfpRegister {
public: public:
static const int kMaxNumLowRegisters = 16; static constexpr int kMaxNumLowRegisters = 16;
operator DwVfpRegister() const { constexpr operator DwVfpRegister() const {
DwVfpRegister r = { reg_code }; return DwVfpRegister { reg_code };
return r;
} }
static LowDwVfpRegister from_code(int code) { static LowDwVfpRegister from_code(int code) {
LowDwVfpRegister r = { code }; LowDwVfpRegister r = { code };
@ -282,7 +287,7 @@ struct LowDwVfpRegister {
// Quad word NEON register. // Quad word NEON register.
struct QwNeonRegister { struct QwNeonRegister {
static const int kMaxNumRegisters = 16; static constexpr int kMaxNumRegisters = 16;
static QwNeonRegister from_code(int code) { static QwNeonRegister from_code(int code) {
QwNeonRegister r = { code }; QwNeonRegister r = { code };
@ -297,12 +302,15 @@ struct QwNeonRegister {
DCHECK(is_valid()); DCHECK(is_valid());
return reg_code; return reg_code;
} }
void split_code(int* vm, int* m) const { static void split_code(int reg_code, int* vm, int* m) {
DCHECK(is_valid()); DCHECK(from_code(reg_code).is_valid());
int encoded_code = reg_code << 1; int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4; *m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F; *vm = encoded_code & 0x0F;
} }
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
DwVfpRegister low() const { DwVfpRegister low() const {
DwVfpRegister reg; DwVfpRegister reg;
reg.reg_code = reg_code * 2; reg.reg_code = reg_code * 2;
@ -328,101 +336,100 @@ typedef QwNeonRegister Simd128Register;
// Support for the VFP registers s0 to s31 (d0 to d15). // Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)". // Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 }; constexpr SwVfpRegister s0 = { 0 };
const SwVfpRegister s1 = { 1 }; constexpr SwVfpRegister s1 = { 1 };
const SwVfpRegister s2 = { 2 }; constexpr SwVfpRegister s2 = { 2 };
const SwVfpRegister s3 = { 3 }; constexpr SwVfpRegister s3 = { 3 };
const SwVfpRegister s4 = { 4 }; constexpr SwVfpRegister s4 = { 4 };
const SwVfpRegister s5 = { 5 }; constexpr SwVfpRegister s5 = { 5 };
const SwVfpRegister s6 = { 6 }; constexpr SwVfpRegister s6 = { 6 };
const SwVfpRegister s7 = { 7 }; constexpr SwVfpRegister s7 = { 7 };
const SwVfpRegister s8 = { 8 }; constexpr SwVfpRegister s8 = { 8 };
const SwVfpRegister s9 = { 9 }; constexpr SwVfpRegister s9 = { 9 };
const SwVfpRegister s10 = { 10 }; constexpr SwVfpRegister s10 = { 10 };
const SwVfpRegister s11 = { 11 }; constexpr SwVfpRegister s11 = { 11 };
const SwVfpRegister s12 = { 12 }; constexpr SwVfpRegister s12 = { 12 };
const SwVfpRegister s13 = { 13 }; constexpr SwVfpRegister s13 = { 13 };
const SwVfpRegister s14 = { 14 }; constexpr SwVfpRegister s14 = { 14 };
const SwVfpRegister s15 = { 15 }; constexpr SwVfpRegister s15 = { 15 };
const SwVfpRegister s16 = { 16 }; constexpr SwVfpRegister s16 = { 16 };
const SwVfpRegister s17 = { 17 }; constexpr SwVfpRegister s17 = { 17 };
const SwVfpRegister s18 = { 18 }; constexpr SwVfpRegister s18 = { 18 };
const SwVfpRegister s19 = { 19 }; constexpr SwVfpRegister s19 = { 19 };
const SwVfpRegister s20 = { 20 }; constexpr SwVfpRegister s20 = { 20 };
const SwVfpRegister s21 = { 21 }; constexpr SwVfpRegister s21 = { 21 };
const SwVfpRegister s22 = { 22 }; constexpr SwVfpRegister s22 = { 22 };
const SwVfpRegister s23 = { 23 }; constexpr SwVfpRegister s23 = { 23 };
const SwVfpRegister s24 = { 24 }; constexpr SwVfpRegister s24 = { 24 };
const SwVfpRegister s25 = { 25 }; constexpr SwVfpRegister s25 = { 25 };
const SwVfpRegister s26 = { 26 }; constexpr SwVfpRegister s26 = { 26 };
const SwVfpRegister s27 = { 27 }; constexpr SwVfpRegister s27 = { 27 };
const SwVfpRegister s28 = { 28 }; constexpr SwVfpRegister s28 = { 28 };
const SwVfpRegister s29 = { 29 }; constexpr SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 }; constexpr SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 }; constexpr SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 }; constexpr DwVfpRegister no_dreg = { -1 };
const LowDwVfpRegister d0 = { 0 }; constexpr LowDwVfpRegister d0 = { 0 };
const LowDwVfpRegister d1 = { 1 }; constexpr LowDwVfpRegister d1 = { 1 };
const LowDwVfpRegister d2 = { 2 }; constexpr LowDwVfpRegister d2 = { 2 };
const LowDwVfpRegister d3 = { 3 }; constexpr LowDwVfpRegister d3 = { 3 };
const LowDwVfpRegister d4 = { 4 }; constexpr LowDwVfpRegister d4 = { 4 };
const LowDwVfpRegister d5 = { 5 }; constexpr LowDwVfpRegister d5 = { 5 };
const LowDwVfpRegister d6 = { 6 }; constexpr LowDwVfpRegister d6 = { 6 };
const LowDwVfpRegister d7 = { 7 }; constexpr LowDwVfpRegister d7 = { 7 };
const LowDwVfpRegister d8 = { 8 }; constexpr LowDwVfpRegister d8 = { 8 };
const LowDwVfpRegister d9 = { 9 }; constexpr LowDwVfpRegister d9 = { 9 };
const LowDwVfpRegister d10 = { 10 }; constexpr LowDwVfpRegister d10 = { 10 };
const LowDwVfpRegister d11 = { 11 }; constexpr LowDwVfpRegister d11 = { 11 };
const LowDwVfpRegister d12 = { 12 }; constexpr LowDwVfpRegister d12 = { 12 };
const LowDwVfpRegister d13 = { 13 }; constexpr LowDwVfpRegister d13 = { 13 };
const LowDwVfpRegister d14 = { 14 }; constexpr LowDwVfpRegister d14 = { 14 };
const LowDwVfpRegister d15 = { 15 }; constexpr LowDwVfpRegister d15 = { 15 };
const DwVfpRegister d16 = { 16 }; constexpr DwVfpRegister d16 = { 16 };
const DwVfpRegister d17 = { 17 }; constexpr DwVfpRegister d17 = { 17 };
const DwVfpRegister d18 = { 18 }; constexpr DwVfpRegister d18 = { 18 };
const DwVfpRegister d19 = { 19 }; constexpr DwVfpRegister d19 = { 19 };
const DwVfpRegister d20 = { 20 }; constexpr DwVfpRegister d20 = { 20 };
const DwVfpRegister d21 = { 21 }; constexpr DwVfpRegister d21 = { 21 };
const DwVfpRegister d22 = { 22 }; constexpr DwVfpRegister d22 = { 22 };
const DwVfpRegister d23 = { 23 }; constexpr DwVfpRegister d23 = { 23 };
const DwVfpRegister d24 = { 24 }; constexpr DwVfpRegister d24 = { 24 };
const DwVfpRegister d25 = { 25 }; constexpr DwVfpRegister d25 = { 25 };
const DwVfpRegister d26 = { 26 }; constexpr DwVfpRegister d26 = { 26 };
const DwVfpRegister d27 = { 27 }; constexpr DwVfpRegister d27 = { 27 };
const DwVfpRegister d28 = { 28 }; constexpr DwVfpRegister d28 = { 28 };
const DwVfpRegister d29 = { 29 }; constexpr DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 }; constexpr DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 }; constexpr DwVfpRegister d31 = { 31 };
const QwNeonRegister q0 = { 0 }; constexpr QwNeonRegister q0 = { 0 };
const QwNeonRegister q1 = { 1 }; constexpr QwNeonRegister q1 = { 1 };
const QwNeonRegister q2 = { 2 }; constexpr QwNeonRegister q2 = { 2 };
const QwNeonRegister q3 = { 3 }; constexpr QwNeonRegister q3 = { 3 };
const QwNeonRegister q4 = { 4 }; constexpr QwNeonRegister q4 = { 4 };
const QwNeonRegister q5 = { 5 }; constexpr QwNeonRegister q5 = { 5 };
const QwNeonRegister q6 = { 6 }; constexpr QwNeonRegister q6 = { 6 };
const QwNeonRegister q7 = { 7 }; constexpr QwNeonRegister q7 = { 7 };
const QwNeonRegister q8 = { 8 }; constexpr QwNeonRegister q8 = { 8 };
const QwNeonRegister q9 = { 9 }; constexpr QwNeonRegister q9 = { 9 };
const QwNeonRegister q10 = { 10 }; constexpr QwNeonRegister q10 = { 10 };
const QwNeonRegister q11 = { 11 }; constexpr QwNeonRegister q11 = { 11 };
const QwNeonRegister q12 = { 12 }; constexpr QwNeonRegister q12 = { 12 };
const QwNeonRegister q13 = { 13 }; constexpr QwNeonRegister q13 = { 13 };
const QwNeonRegister q14 = { 14 }; constexpr QwNeonRegister q14 = { 14 };
const QwNeonRegister q15 = { 15 }; constexpr QwNeonRegister q15 = { 15 };
// Aliases for double registers. Defined using #define instead of // Aliases for double registers.
// "static const DwVfpRegister&" because Clang complains otherwise when a constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
// compilation unit that includes this header doesn't use the variables. constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
#define kFirstCalleeSavedDoubleReg d8 // kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg. SIMD
#define kLastCalleeSavedDoubleReg d15 // code depends on kDoubleRegZero before kScratchDoubleReg.
// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg. constexpr LowDwVfpRegister kDoubleRegZero = d14;
#define kDoubleRegZero d14 constexpr LowDwVfpRegister kScratchDoubleReg = d15;
#define kScratchDoubleReg d15
// After using kScratchQuadReg, kDoubleRegZero must be reset to 0. // After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
#define kScratchQuadReg q7 constexpr QwNeonRegister kScratchQuadReg = q7;
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
@ -442,24 +449,24 @@ struct CRegister {
}; };
const CRegister no_creg = { -1 }; constexpr CRegister no_creg = { -1 };
const CRegister cr0 = { 0 }; constexpr CRegister cr0 = { 0 };
const CRegister cr1 = { 1 }; constexpr CRegister cr1 = { 1 };
const CRegister cr2 = { 2 }; constexpr CRegister cr2 = { 2 };
const CRegister cr3 = { 3 }; constexpr CRegister cr3 = { 3 };
const CRegister cr4 = { 4 }; constexpr CRegister cr4 = { 4 };
const CRegister cr5 = { 5 }; constexpr CRegister cr5 = { 5 };
const CRegister cr6 = { 6 }; constexpr CRegister cr6 = { 6 };
const CRegister cr7 = { 7 }; constexpr CRegister cr7 = { 7 };
const CRegister cr8 = { 8 }; constexpr CRegister cr8 = { 8 };
const CRegister cr9 = { 9 }; constexpr CRegister cr9 = { 9 };
const CRegister cr10 = { 10 }; constexpr CRegister cr10 = { 10 };
const CRegister cr11 = { 11 }; constexpr CRegister cr11 = { 11 };
const CRegister cr12 = { 12 }; constexpr CRegister cr12 = { 12 };
const CRegister cr13 = { 13 }; constexpr CRegister cr13 = { 13 };
const CRegister cr14 = { 14 }; constexpr CRegister cr14 = { 14 };
const CRegister cr15 = { 15 }; constexpr CRegister cr15 = { 15 };
// Coprocessor number // Coprocessor number
@ -492,9 +499,7 @@ class Operand BASE_EMBEDDED {
// immediate // immediate
INLINE(explicit Operand(int32_t immediate, INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE32)); RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) { INLINE(static Operand Zero());
return Operand(static_cast<int32_t>(0));
}
INLINE(explicit Operand(const ExternalReference& f)); INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<Object> handle); explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value)); INLINE(explicit Operand(Smi* value));
@ -520,7 +525,12 @@ class Operand BASE_EMBEDDED {
explicit Operand(Register rm, ShiftOp shift_op, Register rs); explicit Operand(Register rm, ShiftOp shift_op, Register rs);
// Return true if this is a register operand. // Return true if this is a register operand.
INLINE(bool is_reg() const); INLINE(bool is_reg() const) {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
// Return the number of actual instructions required to implement the given // Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction, // instruction for this particular operand. This can be a single instruction,
@ -667,8 +677,8 @@ class NeonListOperand BASE_EMBEDDED {
struct VmovIndex { struct VmovIndex {
unsigned char index; unsigned char index;
}; };
const VmovIndex VmovIndexLo = { 0 }; constexpr VmovIndex VmovIndexLo = { 0 };
const VmovIndex VmovIndexHi = { 1 }; constexpr VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase { class Assembler : public AssemblerBase {
public: public:
@ -685,7 +695,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer // for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done // is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler. // upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size); Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler(); virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor // GetCode emits any pending (non-emitted) code and fills the descriptor
@ -725,6 +737,7 @@ class Assembler : public AssemblerBase {
Address constant_pool)); Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool)); INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at( INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target, Isolate* isolate, Address pc, Address constant_pool, Address target,
@ -756,24 +769,24 @@ class Assembler : public AssemblerBase {
// Here we are patching the address in the constant pool, not the actual call // Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a // instruction. The address in the constant pool is the same size as a
// pointer. // pointer.
static const int kSpecialTargetSize = kPointerSize; static constexpr int kSpecialTargetSize = kPointerSize;
// Size of an instruction. // Size of an instruction.
static const int kInstrSize = sizeof(Instr); static constexpr int kInstrSize = sizeof(Instr);
// Distance between start of patched debug break slot and the emitted address // Distance between start of patched debug break slot and the emitted address
// to jump to. // to jump to.
// Patched debug break slot code is: // Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start // ldr ip, [pc, #0] @ emited address and start
// blx ip // blx ip
static const int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize; static constexpr int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc // Difference between address of current opcode and value read from pc
// register. // register.
static const int kPcLoadDelta = 8; static constexpr int kPcLoadDelta = 8;
static const int kDebugBreakSlotInstructions = 4; static constexpr int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength = static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize; kDebugBreakSlotInstructions * kInstrSize;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -814,9 +827,7 @@ class Assembler : public AssemblerBase {
void sub(Register dst, Register src1, const Operand& src2, void sub(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void sub(Register dst, Register src1, Register src2, void sub(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) { SBit s = LeaveCC, Condition cond = al);
sub(dst, src1, Operand(src2), s, cond);
}
void rsb(Register dst, Register src1, const Operand& src2, void rsb(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
@ -824,9 +835,7 @@ class Assembler : public AssemblerBase {
void add(Register dst, Register src1, const Operand& src2, void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void add(Register dst, Register src1, Register src2, void add(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) { SBit s = LeaveCC, Condition cond = al);
add(dst, src1, Operand(src2), s, cond);
}
void adc(Register dst, Register src1, const Operand& src2, void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
@ -838,16 +847,13 @@ class Assembler : public AssemblerBase {
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void tst(Register src1, const Operand& src2, Condition cond = al); void tst(Register src1, const Operand& src2, Condition cond = al);
void tst(Register src1, Register src2, Condition cond = al) { void tst(Register src1, Register src2, Condition cond = al);
tst(src1, Operand(src2), cond);
}
void teq(Register src1, const Operand& src2, Condition cond = al); void teq(Register src1, const Operand& src2, Condition cond = al);
void cmp(Register src1, const Operand& src2, Condition cond = al); void cmp(Register src1, const Operand& src2, Condition cond = al);
void cmp(Register src1, Register src2, Condition cond = al) { void cmp(Register src1, Register src2, Condition cond = al);
cmp(src1, Operand(src2), cond);
}
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al); void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al); void cmn(Register src1, const Operand& src2, Condition cond = al);
@ -855,15 +861,11 @@ class Assembler : public AssemblerBase {
void orr(Register dst, Register src1, const Operand& src2, void orr(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void orr(Register dst, Register src1, Register src2, void orr(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) { SBit s = LeaveCC, Condition cond = al);
orr(dst, src1, Operand(src2), s, cond);
}
void mov(Register dst, const Operand& src, void mov(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) { void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
mov(dst, Operand(src), s, cond);
}
// Load the position of the label relative to the generated code object // Load the position of the label relative to the generated code object
// pointer in a register. // pointer in a register.
@ -883,31 +885,13 @@ class Assembler : public AssemblerBase {
// Shift instructions // Shift instructions
void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) { Condition cond = al);
if (src2.is_reg()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
}
}
void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) { Condition cond = al);
if (src2.is_reg()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
}
}
void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) { Condition cond = al);
if (src2.is_reg()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
}
}
// Multiply instructions // Multiply instructions
@ -1337,33 +1321,36 @@ class Assembler : public AssemblerBase {
void vst1(NeonSize size, void vst1(NeonSize size,
const NeonListOperand& src, const NeonListOperand& src,
const NeonMemOperand& dst); const NeonMemOperand& dst);
// dt represents the narrower type
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src); void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
// dt represents the narrower type.
void vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src);
// Only unconditional core <-> scalar moves are currently supported. // Only unconditional core <-> scalar moves are currently supported.
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src); void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index); void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
void vmov(const QwNeonRegister dst, const QwNeonRegister src); void vmov(QwNeonRegister dst, QwNeonRegister src);
void vmvn(const QwNeonRegister dst, const QwNeonRegister src); void vdup(NeonSize size, QwNeonRegister dst, Register src);
void vdup(QwNeonRegister dst, SwVfpRegister src);
void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);
void vmvn(QwNeonRegister dst, QwNeonRegister src);
void vswp(DwVfpRegister dst, DwVfpRegister src); void vswp(DwVfpRegister dst, DwVfpRegister src);
void vswp(QwNeonRegister dst, QwNeonRegister src); void vswp(QwNeonRegister dst, QwNeonRegister src);
// vdup conditional execution isn't supported. void vabs(QwNeonRegister dst, QwNeonRegister src);
void vdup(NeonSize size, const QwNeonRegister dst, const Register src); void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vdup(const QwNeonRegister dst, const SwVfpRegister src); void vneg(QwNeonRegister dst, QwNeonRegister src);
void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vneg(const QwNeonRegister dst, const QwNeonRegister src);
void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
@ -1385,6 +1372,10 @@ class Assembler : public AssemblerBase {
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst, void vmax(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2); QwNeonRegister src1, QwNeonRegister src2);
void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift); void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift); void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
// vrecpe and vrsqrte only support floating point lanes. // vrecpe and vrsqrte only support floating point lanes.
@ -1398,24 +1389,26 @@ class Assembler : public AssemblerBase {
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2); QwNeonRegister src2);
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcge(NeonDataType dt, QwNeonRegister dst, void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src1, QwNeonRegister src2); QwNeonRegister src2);
void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcgt(NeonDataType dt, QwNeonRegister dst, void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src1, QwNeonRegister src2); QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1, void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
const QwNeonRegister src2, int bytes); int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src); void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
void vrev16(NeonSize size, const QwNeonRegister dst, void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
const QwNeonRegister src); void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
void vrev32(NeonSize size, const QwNeonRegister dst, void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
const QwNeonRegister src); void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vrev64(NeonSize size, const QwNeonRegister dst, void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
const QwNeonRegister src); void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vtbl(const DwVfpRegister dst, const NeonListOperand& list, void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
const DwVfpRegister index); void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
void vtbx(const DwVfpRegister dst, const NeonListOperand& list, void vtbl(DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index); DwVfpRegister index);
void vtbx(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index);
// Pseudo instructions // Pseudo instructions
@ -1443,9 +1436,7 @@ class Assembler : public AssemblerBase {
ldr(dst, MemOperand(sp, 4, PostIndex), cond); ldr(dst, MemOperand(sp, 4, PostIndex), cond);
} }
void pop() { void pop();
add(sp, sp, Operand(kPointerSize));
}
void vpush(DwVfpRegister src, Condition cond = al) { void vpush(DwVfpRegister src, Condition cond = al) {
vstm(db_w, sp, src, src, cond); vstm(db_w, sp, src, src, cond);
@ -1615,12 +1606,14 @@ class Assembler : public AssemblerBase {
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
// PC-relative loads, thereby defining a maximum distance between the // PC-relative loads, thereby defining a maximum distance between the
// instruction and the accessed constant. // instruction and the accessed constant.
static const int kMaxDistToIntPool = 4*KB; static constexpr int kMaxDistToIntPool = 4 * KB;
static const int kMaxDistToFPPool = 1*KB; static constexpr int kMaxDistToFPPool = 1 * KB;
// All relocations could be integer, it therefore acts as the limit. // All relocations could be integer, it therefore acts as the limit.
static const int kMinNumPendingConstants = 4; static constexpr int kMinNumPendingConstants = 4;
static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize; static constexpr int kMaxNumPending32Constants =
static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize; kMaxDistToIntPool / kInstrSize;
static constexpr int kMaxNumPending64Constants =
kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of // Postpone the generation of the constant pool for the specified number of
// instructions. // instructions.
@ -1715,15 +1708,33 @@ class Assembler : public AssemblerBase {
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2); (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
} }
private: inline void emit(Instr x);
int next_buffer_check_; // pc offset of next buffer check
// Code generation // Code generation
// The relocation writer's position is at least kGap bytes below the end of // The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do // the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large // not have to check for overflow. The same is true for writes of large
// relocation info entries. // relocation info entries.
static const int kGap = 32; static constexpr int kGap = 32;
// Relocation info generation
// Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
private:
int next_buffer_check_; // pc offset of next buffer check
// Constant pool generation // Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional // Pools are emitted in the instruction stream, preferably after unconditional
@ -1739,8 +1750,8 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions // expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not // has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers. // an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckPoolIntervalInst = 32; static constexpr int kCheckPoolIntervalInst = 32;
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Emission of the constant pool may be blocked in some code sequences. // Emission of the constant pool may be blocked in some code sequences.
@ -1752,31 +1763,13 @@ class Assembler : public AssemblerBase {
int first_const_pool_32_use_; int first_const_pool_32_use_;
int first_const_pool_64_use_; int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_; ConstantPoolBuilder constant_pool_builder_;
// The bound position, before this we cannot do instruction elimination. // The bound position, before this we cannot do instruction elimination.
int last_bound_pos_; int last_bound_pos_;
// Code emission
inline void CheckBuffer(); inline void CheckBuffer();
void GrowBuffer(); void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values // 32-bit immediate values
void move_32_bit_immediate(Register rd, void move_32_bit_immediate(Register rd,
@ -1808,12 +1801,20 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace; friend class EnsureSpace;
}; };
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
class EnsureSpace BASE_EMBEDDED { class EnsureSpace BASE_EMBEDDED {
public: public:
explicit EnsureSpace(Assembler* assembler) { INLINE(explicit EnsureSpace(Assembler* assembler));
assembler->CheckBuffer(); };
}
class PatchingAssembler : public Assembler {
public:
PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
~PatchingAssembler();
void Emit(Address addr);
void FlushICache(Isolate* isolate);
}; };

406
deps/v8/src/arm/code-stubs-arm.cc

@ -5,14 +5,19 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/code-stubs.h" #include "src/code-stubs.h"
#include "src/api-arguments.h" #include "src/api-arguments.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h" #include "src/ic/handler-compiler.h"
#include "src/ic/ic.h" #include "src/ic/ic.h"
#include "src/ic/stub-cache.h" #include "src/ic/stub-cache.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h" #include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h" #include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h" #include "src/runtime/runtime.h"
@ -1148,173 +1153,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
} }
void RegExpExecStub::Generate(MacroAssembler* masm) { void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP #ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec); // This case is handled prior to the RegExpExecStub call.
__ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP #else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
// sp[4]: previous index
// sp[8]: subject string
// sp[12]: JSRegExp object
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
// directly from generated code the native RegExp code will not do a GC and
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
__ cmp(r0, Operand::Zero());
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ SmiTst(regexp_data);
__ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// regexp_data: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
__ b(ne, &runtime);
// regexp_data: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures * 2 <= offsets vector size - 2
// Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
// Reset offset for possibly sliced string.
__ mov(r9, Operand::Zero());
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
__ mov(r3, subject); // Make a copy of the original subject string.
// subject: subject string
// r3: subject string
// regexp_data: RegExp data (FixedArray)
// Handle subject string according to its encoding and representation:
// (1) Sequential string? If yes, go to (4).
// (2) Sequential or cons? If not, go to (5).
// (3) Cons string. If the string is flat, replace subject with first string
// and go to (1). Otherwise bail out to runtime.
// (4) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
// (5) Long external string? If not, go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
// (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
__ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// (1) Sequential string? If yes, go to (4).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string); // Go to (4).
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(ge, &not_seq_nor_cons); // Go to (5).
// (3) Cons string. Check that it's flat.
// Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ CompareRoot(r0, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ jmp(&check_underlying);
// (4) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
// subject: sequential subject string (or look-alike, external string)
// r3: original subject string
// Load previous index and check range before r3 is overwritten. We have to
// use r3 instead of subject here because subject might have been only made
// to look like a sequential string when it actually is an external string.
__ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
__ JumpIfNotSmi(r1, &runtime);
__ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
__ cmp(r3, Operand(r1));
__ b(ls, &runtime);
__ SmiUntag(r1);
STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 3), SetCC);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
ne);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
// r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if one_byte, 0 if two_byte);
// r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9; const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4; const int kParameterRegisters = 4;
@ -1324,228 +1166,61 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Arguments are before that on the stack or in registers. // Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address. // Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
__ str(r0, MemOperand(sp, 5 * kPointerSize)); __ str(r5, MemOperand(sp, 5 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1)); __ mov(r5, Operand(1));
__ str(r0, MemOperand(sp, 4 * kPointerSize)); __ str(r5, MemOperand(sp, 4 * kPointerSize));
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address)); ExternalReference address_of_regexp_stack_memory_address =
__ ldr(r0, MemOperand(r0, 0)); ExternalReference::address_of_regexp_stack_memory_address(isolate());
__ mov(r2, Operand(address_of_regexp_stack_memory_size)); ExternalReference address_of_regexp_stack_memory_size =
__ ldr(r2, MemOperand(r2, 0)); ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ add(r0, r0, Operand(r2)); __ mov(r5, Operand(address_of_regexp_stack_memory_address));
__ str(r0, MemOperand(sp, 3 * kPointerSize)); __ ldr(r5, MemOperand(r5, 0));
__ mov(r6, Operand(address_of_regexp_stack_memory_size));
__ ldr(r6, MemOperand(r6, 0));
__ add(r5, r5, Operand(r6));
__ str(r5, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global // Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps. // regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand::Zero()); __ mov(r5, Operand::Zero());
__ str(r0, MemOperand(sp, 2 * kPointerSize)); __ str(r5, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer. // Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0, __ mov(
Operand(ExternalReference::address_of_static_offsets_vector( r5,
isolate()))); Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
__ str(r0, MemOperand(sp, 1 * kPointerSize)); __ str(r5, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and // Argument 4: End of string data
// calculate the shift of the index (0 for one-byte and 1 for two-byte). // Argument 3: Start of string data
__ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); CHECK(r3.is(RegExpExecDescriptor::StringEndRegister()));
__ eor(r3, r3, Operand(1)); CHECK(r2.is(RegExpExecDescriptor::StringStartRegister()));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
// sizes below the previous sp. (Because creating a new stack frame pushes
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
__ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
// If slice offset is not 0, load the length from the original sliced string.
// Argument 4, r3: End of string data
// Argument 3, r2: Start of string data
// Prepare start and end index of the input.
__ add(r9, r7, Operand(r9, LSL, r3));
__ add(r2, r9, Operand(r1, LSL, r3));
__ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
__ SmiUntag(r7);
__ add(r3, r9, Operand(r7, LSL, r3));
// Argument 2 (r1): Previous index. // Argument 2 (r1): Previous index.
// Already there CHECK(r1.is(RegExpExecDescriptor::LastIndexRegister()));
// Argument 1 (r0): Subject string. // Argument 1 (r0): Subject string.
__ mov(r0, subject); CHECK(r0.is(RegExpExecDescriptor::StringRegister()));
// Locate the code entry and call it. // Locate the code entry and call it.
__ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); Register code_reg = RegExpExecDescriptor::CodeRegister();
__ add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub(isolate()); DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, r6); stub.GenerateCall(masm, code_reg);
__ LeaveExitFrame(false, no_reg, true); __ LeaveExitFrame(false, no_reg, true);
last_match_info_elements = r6; __ SmiTag(r0);
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
// Check the result.
Label success;
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
__ b(eq, &failure);
__ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ b(ne, &runtime);
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(isolate()->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// Process the result from the native regexp code.
__ bind(&success);
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
// Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
// Check that the last match info is a FixedArray.
__ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ cmp(r2, Operand::SmiUntag(r0));
__ b(gt, &runtime);
// r1: number of capture registers
// r4: subject string
// Store the capture count.
__ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
__ str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset));
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset, subject, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(subject, r2);
__ str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset, subject, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
// r2: offsets vector
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wrapping after zero.
__ add(r0, last_match_info_elements,
Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ sub(r1, r1, Operand(1), SetCC);
__ b(mi, &done);
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
__ SmiTag(r3);
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
__ mov(r0, last_match_info_elements);
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret(); __ Ret();
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (5) Long external string? If not, go to (7).
__ bind(&not_seq_nor_cons);
// Compare flags are still set.
__ b(gt, &not_long_external); // Go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(r0, Operand(kIsIndirectStringMask));
__ Assert(eq, kExternalStringExpectedButNotFound);
}
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ jmp(&seq_string); // Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
__ bind(&not_long_external);
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ b(ne, &runtime);
// (8) Sliced or thin string. Replace subject with parent. Go to (4).
Label thin_string;
__ cmp(r1, Operand(kThinStringTag));
__ b(eq, &thin_string);
// Load offset into r9 and replace subject string with parent.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r9);
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
__ bind(&thin_string);
__ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
__ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP
} }
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) { static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function // r0 : number of arguments to the construct function
// r1 : the function to call // r1 : the function to call
@ -2566,6 +2241,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
} }
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker( void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, MacroAssembler* masm,

4
deps/v8/src/arm/code-stubs-arm.h

@ -197,9 +197,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode); Mode mode);
void InformIncrementalMarker(MacroAssembler* masm); void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override { void Activate(Code* code) override;
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
Register object() const { Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_)); return Register::from_code(ObjectBits::decode(minor_key_));

16
deps/v8/src/arm/codegen-arm.cc

@ -8,6 +8,7 @@
#include <memory> #include <memory>
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h" #include "src/arm/simulator-arm.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
@ -167,7 +168,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc; CodeDesc desc;
masm.GetCode(&desc); masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc)); DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size); Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size); base::OS::ProtectCode(buffer, actual_size);
@ -284,7 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc; CodeDesc desc;
masm.GetCode(&desc); masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc)); DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size); Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size); base::OS::ProtectCode(buffer, actual_size);
@ -464,11 +465,12 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Assembler::FlushICache(isolate, sequence, young_length); Assembler::FlushICache(isolate, sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(isolate, age); Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, PatchingAssembler patcher(Assembler::IsolateData(isolate), sequence,
young_length / Assembler::kInstrSize); young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8)); patcher.add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4)); patcher.ldr(pc, MemOperand(pc, -4));
patcher.masm()->emit_code_stub_address(stub); patcher.emit_code_stub_address(stub);
patcher.FlushICache(isolate);
} }
} }

40
deps/v8/src/arm/deoptimizer-arm.cc

@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/assembler-inl.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h" #include "src/full-codegen/full-codegen.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h" #include "src/register-configuration.h"
#include "src/safepoint-table.h" #include "src/safepoint-table.h"
@ -40,16 +42,21 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else { } else {
pointer = code->instruction_start(); pointer = code->instruction_start();
} }
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0); {
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}
DeoptimizationInputData* data = DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data()); DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value(); int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) { if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset, PatchingAssembler patcher(Assembler::IsolateData(isolate),
1); code->instruction_start() + osr_offset, 1);
osr_patcher.masm()->bkpt(0); patcher.bkpt(0);
patcher.FlushICache(isolate);
} }
} }
@ -114,6 +121,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters; const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kMaxNumRegisters;
// Save all allocatable VFP registers before messing with them. // Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 14); DCHECK(kDoubleRegZero.code() == 14);
@ -132,6 +140,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ vstm(db_w, sp, d16, d31, ne); __ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15); __ vstm(db_w, sp, d0, d15);
// Push registers s0-s15, and possibly s16-s31, on the stack.
// If s16-s31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, s16, s31, ne);
__ sub(sp, sp, Operand(16 * kFloatSize), LeaveCC, eq);
__ vstm(db_w, sp, s0, s15);
} }
// Push all 16 registers (needed to populate FrameDescription::registers_). // Push all 16 registers (needed to populate FrameDescription::registers_).
@ -143,7 +157,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ str(fp, MemOperand(ip)); __ str(fp, MemOperand(ip));
const int kSavedRegistersAreaSize = const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack. // Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
@ -196,11 +210,23 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i); int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset; int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
__ vldr(d0, sp, src_offset); __ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset); __ vstr(d0, r1, dst_offset);
} }
// Copy VFP registers to
// float_registers_[FloatRegister::kMaxNumAllocatableRegisters]
int float_regs_offset = FrameDescription::float_registers_offset();
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
__ ldr(r2, MemOperand(sp, src_offset));
__ str(r2, MemOperand(r1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack. // Remove the bailout id and the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));

230
deps/v8/src/arm/disasm-arm.cc

@ -1364,12 +1364,6 @@ int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) { if (instr->Bit(24) == 1) {
if (instr->SvcValue() >= kStopCode) { if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc"); Format(instr, "stop'cond 'svc");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "\n %p %08x",
reinterpret_cast<void*>(instr + Instruction::kInstrSize),
*reinterpret_cast<uint32_t*>(instr + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else { } else {
Format(instr, "svc'cond 'svc"); Format(instr, "svc'cond 'svc");
} }
@ -1582,19 +1576,19 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]"); Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
} }
} else { } else {
const char* sign = instr->Bit(23) != 0 ? "u" : "s"; char sign = instr->Bit(23) != 0 ? 'u' : 's';
int rt = instr->RtValue(); int rt = instr->RtValue();
int vn = instr->VFPNRegValue(kDoublePrecision); int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) { if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8 // NeonS8 / NeonU8
int i = opc1_opc2 & 0x7; int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i); "vmov.%c8 r%d, d%d[%d]", sign, rt, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) { } else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16 // NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3; int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]", SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 r%d, d%d[%d]",
sign, rt, vn, i); sign, rt, vn, i);
} else { } else {
Unknown(instr); Unknown(instr);
@ -1867,10 +1861,10 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Vm = instr->VFPMRegValue(kSimd128Precision); Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision); Vn = instr->VFPNRegValue(kSimd128Precision);
} }
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
switch (instr->Bits(11, 8)) { switch (instr->Bits(11, 8)) {
case 0x0: { case 0x0: {
if (instr->Bit(4) == 1) { if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.s<size> Qd, Qm, Qn. // vqadd.s<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1904,7 +1898,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
case 0x2: { case 0x2: {
if (instr->Bit(4) == 1) { if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.s<size> Qd, Qm, Qn. // vqsub.s<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1915,7 +1908,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break; break;
} }
case 0x3: { case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt"; const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn. // vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
@ -1924,7 +1916,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break; break;
} }
case 0x6: { case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.s<size> Qd, Qm, Qn. // vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax"; const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ += out_buffer_pos_ +=
@ -1934,7 +1925,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
case 0x8: { case 0x8: {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst"; const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vadd/vtst.i<size> Qd, Qm, Qn. // vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
@ -1943,7 +1933,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
case 0x9: { case 0x9: {
if (instr->Bit(6) == 1 && instr->Bit(4) == 1) { if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmul.i<size> Qd, Qm, Qn. // vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1953,6 +1942,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
break; break;
} }
case 0xa: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d d%d, d%d, d%d",
op, size, Vd, Vn, Vm);
break;
}
case 0xd: { case 0xd: {
if (instr->Bit(4) == 0) { if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub"; const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
@ -2052,10 +2049,10 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Vm = instr->VFPMRegValue(kSimd128Precision); Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision); Vn = instr->VFPNRegValue(kSimd128Precision);
} }
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
switch (instr->Bits(11, 8)) { switch (instr->Bits(11, 8)) {
case 0x0: { case 0x0: {
if (instr->Bit(4) == 1) { if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.u<size> Qd, Qm, Qn. // vqadd.u<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2087,7 +2084,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
case 0x2: { case 0x2: {
if (instr->Bit(4) == 1) { if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.u<size> Qd, Qm, Qn. // vqsub.u<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2098,7 +2094,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break; break;
} }
case 0x3: { case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt"; const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn. // vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ += out_buffer_pos_ +=
@ -2107,7 +2102,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break; break;
} }
case 0x6: { case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.u<size> Qd, Qm, Qn. // vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax"; const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ += out_buffer_pos_ +=
@ -2116,7 +2110,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break; break;
} }
case 0x8: { case 0x8: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
if (instr->Bit(4) == 0) { if (instr->Bit(4) == 0) {
out_buffer_pos_ += out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2128,6 +2121,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
} }
break; break;
} }
case 0xa: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d d%d, d%d, d%d",
op, size, Vd, Vn, Vm);
break;
}
case 0xd: { case 0xd: {
if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) { if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm // vmul.f32 Qd, Qn, Qm
@ -2165,53 +2166,13 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19); int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm); "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 && } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
instr->Bit(4) == 0) { if (instr->Bits(11, 7) == 0x18) {
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
if (instr->Bit(6) == 0) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp d%d, d%d", Vd, Vm);
} else {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp q%d, q%d", Vd, Vm);
}
} else if (instr->Bits(11, 7) == 0x18) {
int Vd = instr->VFPDRegValue(kSimd128Precision); int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision); int Vm = instr->VFPMRegValue(kDoublePrecision);
int index = instr->Bit(19); int index = instr->Bit(19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup q%d, d%d[%d]", Vd, Vm, index); "vdup q%d, d%d[%d]", Vd, Vm, index);
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
} else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* suffix = nullptr;
int op = instr->Bits(8, 7);
switch (op) {
case 0:
suffix = "f32.s32";
break;
case 1:
suffix = "f32.u32";
break;
case 2:
suffix = "s32.f32";
break;
case 3:
suffix = "u32.f32";
break;
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vcvt.%s q%d, q%d", suffix, Vd, Vm);
} else if (instr->Bits(11, 10) == 0x2) { } else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision); int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision); int Vn = instr->VFPNRegValue(kDoublePrecision);
@ -2224,51 +2185,116 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
FormatNeonList(Vn, list.type()); FormatNeonList(Vn, list.type());
Print(", "); Print(", ");
PrintDRegister(Vm); PrintDRegister(Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) { } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
int Vd = instr->VFPDRegValue(kSimd128Precision); instr->Bits(7, 6) != 0) {
int Vm = instr->VFPMRegValue(kSimd128Precision); // vqmovn.<type><size> Dd, Qm.
int size = kBitsPerByte * (1 << instr->Bits(19, 18)); int Vd = instr->VFPDRegValue(kDoublePrecision);
// vzip.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vzip.%d q%d, q%d", size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
int op = kBitsPerByte
<< (static_cast<int>(Neon64) - instr->Bits(8, 7));
// vrev<op>.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vrev%d.%d q%d, q%d", op, size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision); int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18)); char type = instr->Bit(6) != 0 ? 'u' : 's';
const char* type = instr->Bit(10) != 0 ? "f" : "s"; int size = 2 * kBitsPerByte * (1 << instr->Bits(19, 18));
if (instr->Bits(9, 6) == 0xd) { out_buffer_pos_ +=
// vabs<type>.<size> Qd, Qm. SNPrintF(out_buffer_ + out_buffer_pos_, "vqmovn.%c%i d%d, q%d",
out_buffer_pos_ += type, size, Vd, Vm);
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d", } else {
type, size, Vd, Vm); int Vd, Vm;
} else if (instr->Bits(9, 6) == 0xf) { if (instr->Bit(6) == 0) {
// vneg<type>.<size> Qd, Qm. Vd = instr->VFPDRegValue(kDoublePrecision);
out_buffer_pos_ += Vm = instr->VFPMRegValue(kDoublePrecision);
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d", } else {
type, size, Vd, Vm); Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
}
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
if (instr->Bit(6) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp d%d, d%d", Vd, Vm);
} else {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp q%d, q%d", Vd, Vm);
}
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmvn q%d, q%d", Vd, Vm);
} else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
instr->Bit(6) == 1) {
const char* suffix = nullptr;
int op = instr->Bits(8, 7);
switch (op) {
case 0:
suffix = "f32.s32";
break;
case 1:
suffix = "f32.u32";
break;
case 2:
suffix = "s32.f32";
break;
case 3:
suffix = "u32.f32";
break;
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vcvt.%s q%d, q%d", suffix, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
if (instr->Bit(6) == 0) {
// vzip/vuzp.<size> Dd, Dm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.%d d%d, d%d", op, size, Vd, Vm);
} else {
// vzip/vuzp.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.%d q%d, q%d", op, size, Vd, Vm);
}
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0 &&
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
int op = kBitsPerByte
<< (static_cast<int>(Neon64) - instr->Bits(8, 7));
// vrev<op>.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vrev%d.%d q%d, q%d", op, size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
if (instr->Bit(6) == 0) {
// vtrn.<size> Dd, Dm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vtrn.%d d%d, d%d", size, Vd, Vm);
} else {
// vtrn.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vtrn.%d q%d, q%d", size, Vd, Vm);
}
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0 &&
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
char type = instr->Bit(10) != 0 ? 'f' : 's';
if (instr->Bits(9, 6) == 0xd) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
type, size, Vd, Vm);
} else if (instr->Bits(9, 6) == 0xf) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
type, size, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5 &&
instr->Bit(6) == 1) {
// vrecpe/vrsqrte.f32 Qd, Qm.
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else { } else {
Unknown(instr); Unknown(instr);
} }
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
// vrecpe/vrsqrte.f32 Qd, Qm.
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
} }
} else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) { } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1 &&
instr->Bit(6) == 1) {
// vshr.u<size> Qd, Qm, shift // vshr.u<size> Qd, Qm, shift
int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16)); int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
int shift = 2 * size - instr->Bits(21, 16); int shift = 2 * size - instr->Bits(21, 16);

57
deps/v8/src/arm/interface-descriptors-arm.cc

@ -54,11 +54,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; } const Register MathPowTaggedDescriptor::exponent() { return r2; }
const Register MathPowIntegerDescriptor::exponent() { const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent(); return MathPowTaggedDescriptor::exponent();
} }
const Register RegExpExecDescriptor::StringRegister() { return r0; }
const Register RegExpExecDescriptor::LastIndexRegister() { return r1; }
const Register RegExpExecDescriptor::StringStartRegister() { return r2; }
const Register RegExpExecDescriptor::StringEndRegister() { return r3; }
const Register RegExpExecDescriptor::CodeRegister() { return r4; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; } const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; } const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
@ -282,46 +286,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
r2, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
r2, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
r0, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific( void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
@ -360,7 +324,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
r0, // argument count (not including receiver) r0, // argument count (not including receiver)
@ -370,7 +334,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
r0, // argument count (not including receiver) r0, // argument count (not including receiver)
@ -382,8 +346,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenConstructArrayDescriptor::
CallInterfaceDescriptorData* data) { InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
r0, // argument count (not including receiver) r0, // argument count (not including receiver)
r1, // target to call checked to be Array function r1, // target to call checked to be Array function
@ -408,7 +372,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = { Register registers[] = {
r0, // the value to pass to the generator r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume r1, // the JSGeneratorObject to resume
r2 // the resume mode (tagged) r2, // the resume mode (tagged)
r3, // SuspendFlags (tagged)
}; };
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }

225
deps/v8/src/arm/macro-assembler-arm.cc

@ -6,11 +6,15 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/assembler-inl.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/division-by-constant.h" #include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h" #include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h" #include "src/register-configuration.h"
#include "src/runtime/runtime.h" #include "src/runtime/runtime.h"
@ -19,14 +23,19 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size, MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object) CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size), : Assembler(isolate, buffer, size),
generating_stub_(false), generating_stub_(false),
has_frame_(false) { has_frame_(false),
isolate_(isolate),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (create_code_object == CodeObjectRequired::kYes) { if (create_code_object == CodeObjectRequired::kYes) {
code_object_ = code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
} }
} }
@ -236,6 +245,9 @@ void MacroAssembler::Push(Handle<Object> handle) {
push(ip); push(ip);
} }
void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void MacroAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void MacroAssembler::Move(Register dst, Handle<Object> value) { void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value)); mov(dst, Operand(value));
@ -1154,6 +1166,15 @@ void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
vmov(dt, dst, double_source, double_lane); vmov(dt, dst, double_source, double_lane);
} }
void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> size;
vmov(dt, dst, src, double_lane);
}
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
Register scratch, int lane) { Register scratch, int lane) {
int s_code = src.code() * 4 + lane; int s_code = src.code() * 4 + lane;
@ -1892,14 +1913,13 @@ void MacroAssembler::IsObjectJSStringType(Register object,
b(ne, fail); b(ne, fail);
} }
Condition MacroAssembler::IsObjectStringType(Register obj, Register type,
void MacroAssembler::IsObjectNameType(Register object, Condition cond) {
Register scratch, ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
Label* fail) { ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); tst(type, Operand(kIsNotStringMask), cond);
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); DCHECK_EQ(0u, kStringTag);
cmp(scratch, Operand(LAST_NAME_TYPE)); return eq;
b(hi, fail);
} }
void MacroAssembler::MaybeDropFrames() { void MacroAssembler::MaybeDropFrames() {
@ -2362,29 +2382,6 @@ void MacroAssembler::CheckMap(Register obj,
} }
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
Jump(success, RelocInfo::CODE_TARGET, eq);
bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
mov(scratch, Operand(cell));
ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
cmp(value, scratch);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell)); mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset)); ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@ -2397,7 +2394,6 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss); JumpIfSmi(value, miss);
} }
void MacroAssembler::GetMapConstructor(Register result, Register map, void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) { Register temp, Register temp2) {
Label done, loop; Label done, loop;
@ -2700,27 +2696,6 @@ void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
} }
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
DCHECK(!elements.is(ip));
Label ok;
push(elements);
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
void MacroAssembler::Check(Condition cond, BailoutReason reason) { void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Label L; Label L;
@ -2812,6 +2787,11 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
} }
} }
void MacroAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
void MacroAssembler::JumpIfNotPowerOfTwoOrZero( void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
Register reg, Register reg,
@ -2835,6 +2815,13 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
b(ne, not_power_of_two); b(ne, not_power_of_two);
} }
void MacroAssembler::SmiTag(Register reg, SBit s) {
add(reg, reg, Operand(reg), s);
}
void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
add(dst, src, Operand(src), s);
}
void MacroAssembler::JumpIfNotBothSmi(Register reg1, void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2, Register reg2,
@ -2853,6 +2840,24 @@ void MacroAssembler::UntagAndJumpIfSmi(
b(cc, smi_case); // Shifter carry is not set for a smi. b(cc, smi_case); // Shifter carry is not set for a smi.
} }
void MacroAssembler::SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
}
void MacroAssembler::NonNegativeSmiTst(Register value) {
tst(value, Operand(kSmiTagMask | kSmiSignMask));
}
void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
void MacroAssembler::JumpIfEitherSmi(Register reg1, void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2, Register reg2,
Label* on_either_smi) { Label* on_either_smi) {
@ -2862,18 +2867,6 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
b(eq, on_either_smi); b(eq, on_either_smi);
} }
void MacroAssembler::AssertNotNumber(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsANumber);
push(object);
CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
pop(object);
Check(ne, kOperandIsANumber);
}
}
void MacroAssembler::AssertNotSmi(Register object) { void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) { if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
@ -2892,34 +2885,6 @@ void MacroAssembler::AssertSmi(Register object) {
} }
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAString);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
Check(lo, kOperandIsNotAString);
}
}
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAName);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, LAST_NAME_TYPE);
pop(object);
Check(le, kOperandIsNotAName);
}
}
void MacroAssembler::AssertFunction(Register object) { void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) { if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
@ -2945,31 +2910,34 @@ void MacroAssembler::AssertBoundFunction(Register object) {
} }
} }
void MacroAssembler::AssertGeneratorObject(Register object) { void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
if (emit_debug_code()) { // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
STATIC_ASSERT(kSmiTag == 0); if (!emit_debug_code()) return;
tst(object, Operand(kSmiTagMask)); tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAGeneratorObject); Check(ne, kOperandIsASmiAndNotAGeneratorObject);
push(object);
CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
pop(object);
Check(eq, kOperandIsNotAGeneratorObject);
}
}
void MacroAssembler::AssertReceiver(Register object) { // Load map
if (emit_debug_code()) { Register map = object;
STATIC_ASSERT(kSmiTag == 0); push(object);
tst(object, Operand(kSmiTagMask)); ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
Check(ne, kOperandIsASmiAndNotAReceiver);
push(object); Label async, do_check;
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); tst(flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE); b(ne, &async);
pop(object);
Check(hs, kOperandIsNotAReceiver);
}
}
// Check if JSGeneratorObject
CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
jmp(&do_check);
bind(&async);
// Check if JSAsyncGeneratorObject
CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) { Register scratch) {
@ -3614,6 +3582,22 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
ldr(dst, FieldMemOperand(dst, offset)); ldr(dst, FieldMemOperand(dst, offset));
} }
template <typename Field>
void MacroAssembler::DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> shift << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
mov(dst, Operand(src, LSL, kSmiTagSize - shift));
and_(dst, dst, Operand(mask));
} else if (shift > kSmiTagSize) {
mov(dst, Operand(src, LSR, shift - kSmiTagSize));
and_(dst, dst, Operand(mask));
} else {
and_(dst, src, Operand(mask));
}
}
void MacroAssembler::CheckEnumCache(Label* call_runtime) { void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Register null_value = r5; Register null_value = r5;
@ -3758,7 +3742,6 @@ bool AreAliased(Register reg1,
} }
#endif #endif
CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions, CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache) FlushICache flush_cache)
: address_(address), : address_(address),

104
deps/v8/src/arm/macro-assembler-arm.h

@ -5,6 +5,7 @@
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/arm/assembler-arm.h"
#include "src/assembler.h" #include "src/assembler.h"
#include "src/bailout-reason.h" #include "src/bailout-reason.h"
#include "src/frames.h" #include "src/frames.h"
@ -92,6 +93,9 @@ class MacroAssembler: public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size, MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object); CodeObjectRequired create_code_object);
int jit_cookie() const { return jit_cookie_; }
Isolate* isolate() const { return isolate_; }
// Returns the size of a call in instructions. Note, the value returned is // Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between // only valid as long as no entries are added to the constant pool between
@ -174,7 +178,7 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); } void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); } void Move(Register dst, Smi* smi);
void Move(Register dst, Handle<Object> value); void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC, void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
@ -332,7 +336,7 @@ class MacroAssembler: public Assembler {
// Push a handle. // Push a handle.
void Push(Handle<Object> handle); void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } void Push(Smi* smi);
// Push two registers. Pushes leftmost register first (to highest address). // Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) { void Push(Register src1, Register src2, Condition cond = al) {
@ -563,6 +567,7 @@ class MacroAssembler: public Assembler {
void VmovExtended(const MemOperand& dst, int src_code, Register scratch); void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane); void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch, void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
int lane); int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
@ -658,11 +663,7 @@ class MacroAssembler: public Assembler {
Register map, Register map,
Register scratch); Register scratch);
void InitializeRootRegister() { void InitializeRootRegister();
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// JavaScript invokes // JavaScript invokes
@ -711,10 +712,6 @@ class MacroAssembler: public Assembler {
Register scratch, Register scratch,
Label* fail); Label* fail);
void IsObjectNameType(Register object,
Register scratch,
Label* fail);
// Frame restart support // Frame restart support
void MaybeDropFrames(); void MaybeDropFrames();
@ -884,17 +881,6 @@ class MacroAssembler: public Assembler {
Label* fail, Label* fail,
SmiCheckType smi_check_type); SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
void GetWeakValue(Register value, Handle<WeakCell> cell); void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given // Load the value of the weak cell in the value register. Branch to the given
@ -927,16 +913,8 @@ class MacroAssembler: public Assembler {
// Returns a condition that will be enabled if the object was a string // Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed // and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged. // then flags remain unchanged.
Condition IsObjectStringType(Register obj, Condition IsObjectStringType(Register obj, Register type,
Register type, Condition cond = al);
Condition cond = al) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
DCHECK_EQ(0u, kStringTag);
return eq;
}
// Get the number of least significant bits from a register // Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
@ -1151,7 +1129,6 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied. // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason); void Assert(Condition cond, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason); void Check(Condition cond, BailoutReason reason);
@ -1201,12 +1178,8 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Smi utilities // Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC) { void SmiTag(Register reg, SBit s = LeaveCC);
add(reg, reg, Operand(reg), s); void SmiTag(Register dst, Register src, SBit s = LeaveCC);
}
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
add(dst, src, Operand(src), s);
}
// Try to convert int32 to smi. If the value is to large, preserve // Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and // the original value and jump to not_a_smi. Destroys scratch and
@ -1233,40 +1206,21 @@ class MacroAssembler: public Assembler {
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Test if the register contains a smi (Z == 0 (eq) if true). // Test if the register contains a smi (Z == 0 (eq) if true).
inline void SmiTst(Register value) { void SmiTst(Register value);
tst(value, Operand(kSmiTagMask)); void NonNegativeSmiTst(Register value);
}
inline void NonNegativeSmiTst(Register value) {
tst(value, Operand(kSmiTagMask | kSmiSignMask));
}
// Jump if the register contains a smi. // Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) { void JumpIfSmi(Register value, Label* smi_label);
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
// Jump if either of the registers contain a non-smi. // Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) { void JumpIfNotSmi(Register value, Label* not_smi_label);
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Jump if either of the registers contain a non-smi. // Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi. // Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a number, enabled via --debug-code.
void AssertNotNumber(Register object);
// Abort execution if argument is a smi, enabled via --debug-code. // Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object); void AssertNotSmi(Register object);
void AssertSmi(Register object); void AssertSmi(Register object);
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code. // Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object); void AssertFunction(Register object);
@ -1276,10 +1230,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSGeneratorObject, // Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code. // enabled via --debug-code.
void AssertGeneratorObject(Register object); void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled // Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code. // via --debug-code.
@ -1352,22 +1303,8 @@ class MacroAssembler: public Assembler {
DecodeField<Field>(reg, reg); DecodeField<Field>(reg, reg);
} }
template<typename Field> template <typename Field>
void DecodeFieldToSmi(Register dst, Register src) { void DecodeFieldToSmi(Register dst, Register src);
static const int shift = Field::kShift;
static const int mask = Field::kMask >> shift << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
mov(dst, Operand(src, LSL, kSmiTagSize - shift));
and_(dst, dst, Operand(mask));
} else if (shift > kSmiTagSize) {
mov(dst, Operand(src, LSR, shift - kSmiTagSize));
and_(dst, dst, Operand(mask));
} else {
and_(dst, src, Operand(mask));
}
}
template<typename Field> template<typename Field>
void DecodeFieldToSmi(Register reg) { void DecodeFieldToSmi(Register reg) {
@ -1450,15 +1387,16 @@ class MacroAssembler: public Assembler {
bool generating_stub_; bool generating_stub_;
bool has_frame_; bool has_frame_;
Isolate* isolate_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<Object> code_object_; Handle<Object> code_object_;
int jit_cookie_;
// Needs access to SafepointRegisterStackIndex for compiled frame // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal. // traversal.
friend class StandardFrame; friend class StandardFrame;
}; };
// The code patcher is used to patch (typically) small parts of code e.g. for // The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher // debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit // the exact number of bytes specified must be emitted. It is not legal to emit

1202
deps/v8/src/arm/simulator-arm.cc

File diff suppressed because it is too large

8
deps/v8/src/arm/simulator-arm.h

@ -154,10 +154,10 @@ class Simulator {
void get_d_register(int dreg, uint32_t* value); void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value); void set_d_register(int dreg, const uint32_t* value);
// Support for NEON. // Support for NEON.
template <typename T> template <typename T, int SIZE = kSimd128Size>
void get_q_register(int qreg, T* value); void get_neon_register(int reg, T (&value)[SIZE / sizeof(T)]);
template <typename T> template <typename T, int SIZE = kSimd128Size>
void set_q_register(int qreg, const T* value); void set_neon_register(int reg, const T (&value)[SIZE / sizeof(T)]);
void set_s_register(int reg, unsigned int value); void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const; unsigned int get_s_register(int reg) const;

55
deps/v8/src/arm64/assembler-arm64-inl.h

@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; } bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; } bool CpuFeatures::SupportsWasmSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) { void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work. // On arm64 only internal references need extra work.
@ -691,32 +691,28 @@ Address RelocInfo::constant_pool_entry_address() {
return Assembler::target_pointer_address_at(pc_); return Assembler::target_pointer_address_at(pc_);
} }
HeapObject* RelocInfo::target_object() {
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); return HeapObject::cast(
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
} }
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>( return Handle<HeapObject>(
Assembler::target_address_at(pc_, host_))); reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
} }
void RelocInfo::set_target_object(HeapObject* target,
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_, Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target), reinterpret_cast<Address>(target),
icache_flush_mode); icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host() != NULL && host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target->IsHeapObject()) { target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
host()->GetHeap()->RecordWriteIntoCode(host(), this, target); host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
} }
} }
@ -745,13 +741,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address(); return target_address();
} }
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_)); DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) { if (target_address() != target) {
set_target_address(target, write_barrier_mode, icache_flush_mode); set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
} }
} }
@ -776,13 +771,11 @@ void RelocInfo::set_target_cell(Cell* cell,
} }
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize; static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on ARM64. UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Object>(); return Handle<Code>();
} }
@ -813,27 +806,25 @@ Address RelocInfo::debug_call_address() {
return Assembler::target_address_at(pc_, host_); return Assembler::target_address_at(pc_, host_);
} }
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()); DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0); STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(isolate_, pc_, host_, target); Assembler::set_target_address_at(isolate, pc_, host_, target);
if (host() != NULL) { if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target); Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
host(), this, HeapObject::cast(target_code)); target_code);
} }
} }
void RelocInfo::WipeOut(Isolate* isolate) {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)); IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) { if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL; Memory::Address_at(pc_) = NULL;
} else { } else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL); Assembler::set_target_address_at(isolate, pc_, host_, NULL);
} }
} }

63
deps/v8/src/arm64/assembler-arm64.cc

@ -28,7 +28,6 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64.h" #include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h" #include "src/arm64/assembler-arm64-inl.h"
@ -200,13 +199,14 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
} }
void RelocInfo::unchecked_update_wasm_memory_reference( void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) { Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode); Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
} }
void RelocInfo::unchecked_update_wasm_size(uint32_t size, void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) { ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size; Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
} }
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2, Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
@ -528,7 +528,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc()); instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
} }
assm_->dc64(data); assm_->dc64(data);
} }
@ -544,7 +544,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc()); instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
assm_->dc64(unique_it->first); assm_->dc64(unique_it->first);
} }
unique_entries_.clear(); unique_entries_.clear();
@ -553,8 +553,8 @@ void ConstPool::EmitEntries() {
// Assembler // Assembler
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size), : AssemblerBase(isolate_data, buffer, buffer_size),
constpool_(this), constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()), recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_() { unresolved_branches_() {
@ -675,22 +675,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) { } else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain. // The branch is the last (but not also the first) instruction in the chain.
prev_link->SetImmPCOffsetTarget(isolate(), prev_link); prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
} else { } else {
// The branch is in the middle of the chain. // The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) { if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(isolate(), next_link); prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
} else if (label_veneer != NULL) { } else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain. // Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(isolate(), prev_link); prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
end_of_chain = false; end_of_chain = false;
link = next_link; link = next_link;
while (!end_of_chain) { while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget(); next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link); end_of_chain = (link == next_link);
link->SetImmPCOffsetTarget(isolate(), label_veneer); link->SetImmPCOffsetTarget(isolate_data(), label_veneer);
link = next_link; link = next_link;
} }
} else { } else {
@ -761,10 +761,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly // Internal references do not get patched to an instruction but directly
// to an address. // to an address.
internal_reference_positions_.push_back(linkoffset); internal_reference_positions_.push_back(linkoffset);
PatchingAssembler patcher(isolate(), link, 2); PatchingAssembler patcher(isolate_data(), reinterpret_cast<byte*>(link),
2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_)); patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else { } else {
link->SetImmPCOffsetTarget(isolate(), link->SetImmPCOffsetTarget(isolate_data(),
reinterpret_cast<Instruction*>(pc_)); reinterpret_cast<Instruction*>(pc_));
} }
@ -1697,19 +1698,19 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
void Assembler::ldar(const Register& rt, const Register& rn) { void Assembler::ldar(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x; LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::ldaxr(const Register& rt, const Register& rn) { void Assembler::ldaxr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x; LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlr(const Register& rt, const Register& rn) { void Assembler::stlr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x; LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlxr(const Register& rs, const Register& rt, void Assembler::stlxr(const Register& rs, const Register& rt,
@ -1717,25 +1718,25 @@ void Assembler::stlxr(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits()); DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x; LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::ldarb(const Register& rt, const Register& rn) { void Assembler::ldarb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::ldaxrb(const Register& rt, const Register& rn) { void Assembler::ldaxrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlrb(const Register& rt, const Register& rn) { void Assembler::stlrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlxrb(const Register& rs, const Register& rt, void Assembler::stlxrb(const Register& rs, const Register& rt,
@ -1743,25 +1744,25 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits()); DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::ldarh(const Register& rt, const Register& rn) { void Assembler::ldarh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::ldaxrh(const Register& rt, const Register& rn) { void Assembler::ldaxrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlrh(const Register& rt, const Register& rn) { void Assembler::stlrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::stlxrh(const Register& rs, const Register& rt, void Assembler::stlxrh(const Register& rs, const Register& rt,
@ -1769,7 +1770,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits()); DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits()); DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits()); DCHECK(rn.Is64Bits());
Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt)); Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
} }
void Assembler::mov(const Register& rd, const Register& rm) { void Assembler::mov(const Register& rd, const Register& rm) {
@ -2948,7 +2949,7 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants. // We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL); RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) && if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) || (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) || (rmode == RelocInfo::INTERNAL_REFERENCE) ||
@ -2978,8 +2979,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
} }
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_), RelocInfo reloc_info_with_ast_id(reinterpret_cast<byte*>(pc_), rmode,
rmode, RecordedAstId().ToInt(), NULL); RecordedAstId().ToInt(), NULL);
ClearRecordedAstId(); ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id); reloc_info_writer.Write(&reloc_info_with_ast_id);
} else { } else {
@ -3068,7 +3069,7 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) { void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL, RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL,
static_cast<intptr_t>(size), NULL); static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
} }
@ -3111,7 +3112,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label. // to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_); Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer); RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(isolate(), veneer); branch->SetImmPCOffsetTarget(isolate_data(), veneer);
b(label); b(label);
#ifdef DEBUG #ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <= DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=

182
deps/v8/src/arm64/assembler-arm64.h

@ -10,6 +10,7 @@
#include <map> #include <map>
#include <vector> #include <vector>
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h" #include "src/arm64/instructions-arm64.h"
#include "src/assembler.h" #include "src/assembler.h"
#include "src/globals.h" #include "src/globals.h"
@ -63,8 +64,8 @@ namespace internal {
R(d25) R(d26) R(d27) R(d28) R(d25) R(d26) R(d27) R(d28)
// clang-format on // clang-format on
static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte; constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
// Some CPURegister methods can return Register and FPRegister types, so we // Some CPURegister methods can return Register and FPRegister types, so we
// need to declare them in advance. // need to declare them in advance.
@ -90,6 +91,11 @@ struct CPURegister {
kNoRegister kNoRegister
}; };
constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type)
: reg_code(reg_code), reg_size(reg_size), reg_type(reg_type) {}
static CPURegister Create(int code, int size, RegisterType type) { static CPURegister Create(int code, int size, RegisterType type) {
CPURegister r = {code, size, type}; CPURegister r = {code, size, type};
return r; return r;
@ -138,25 +144,9 @@ struct Register : public CPURegister {
return Register(CPURegister::Create(code, size, CPURegister::kRegister)); return Register(CPURegister::Create(code, size, CPURegister::kRegister));
} }
Register() { constexpr Register() : CPURegister() {}
reg_code = 0;
reg_size = 0;
reg_type = CPURegister::kNoRegister;
}
explicit Register(const CPURegister& r) {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
Register(const Register& r) { // NOLINT(runtime/explicit) constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
bool IsValid() const { bool IsValid() const {
DCHECK(IsRegister() || IsNone()); DCHECK(IsRegister() || IsNone());
@ -170,7 +160,7 @@ struct Register : public CPURegister {
// These memebers are necessary for compilation. // These memebers are necessary for compilation.
// A few of them may be unused for now. // A few of them may be unused for now.
static const int kNumRegisters = kNumberOfRegisters; static constexpr int kNumRegisters = kNumberOfRegisters;
STATIC_ASSERT(kNumRegisters == Code::kAfterLast); STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; } static int NumRegisters() { return kNumRegisters; }
@ -197,8 +187,8 @@ struct Register : public CPURegister {
// End of V8 compatibility section ----------------------- // End of V8 compatibility section -----------------------
}; };
static const bool kSimpleFPAliasing = true; constexpr bool kSimpleFPAliasing = true;
static const bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
struct FPRegister : public CPURegister { struct FPRegister : public CPURegister {
enum Code { enum Code {
@ -214,25 +204,9 @@ struct FPRegister : public CPURegister {
CPURegister::Create(code, size, CPURegister::kFPRegister)); CPURegister::Create(code, size, CPURegister::kFPRegister));
} }
FPRegister() { constexpr FPRegister() : CPURegister() {}
reg_code = 0;
reg_size = 0;
reg_type = CPURegister::kNoRegister;
}
explicit FPRegister(const CPURegister& r) { constexpr explicit FPRegister(const CPURegister& r) : CPURegister(r) {}
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
bool IsValid() const { bool IsValid() const {
DCHECK(IsFPRegister() || IsNone()); DCHECK(IsFPRegister() || IsNone());
@ -243,7 +217,7 @@ struct FPRegister : public CPURegister {
static FPRegister DRegFromCode(unsigned code); static FPRegister DRegFromCode(unsigned code);
// Start of V8 compatibility section --------------------- // Start of V8 compatibility section ---------------------
static const int kMaxNumRegisters = kNumberOfFPRegisters; static constexpr int kMaxNumRegisters = kNumberOfFPRegisters;
STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast); STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
// Crankshaft can use all the FP registers except: // Crankshaft can use all the FP registers except:
@ -261,54 +235,41 @@ struct FPRegister : public CPURegister {
STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register)); STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister)); STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
#define DEFINE_REGISTER(register_class, name, code, size, type) \
#if defined(ARM64_DEFINE_REG_STATICS) constexpr register_class name { CPURegister(code, size, type) }
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
const CPURegister init_##register_class##_##name = {code, size, type}; \
const register_class& name = *reinterpret_cast<const register_class*>( \
&init_##register_class##_##name)
#define ALIAS_REGISTER(register_class, alias, name) \
const register_class& alias = *reinterpret_cast<const register_class*>( \
&init_##register_class##_##name)
#else
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
extern const register_class& name
#define ALIAS_REGISTER(register_class, alias, name) \ #define ALIAS_REGISTER(register_class, alias, name) \
extern const register_class& alias constexpr register_class alias = name
#endif // defined(ARM64_DEFINE_REG_STATICS)
// No*Reg is used to indicate an unused argument, or an error case. Note that // No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and FPRegister // these all compare equal (using the Is() method). The Register and FPRegister
// variants are provided for convenience. // variants are provided for convenience.
INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister); DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister); DEFINE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister); DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
// v8 compatibility. // v8 compatibility.
INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister); DEFINE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
#define DEFINE_REGISTERS(N) \ #define DEFINE_REGISTERS(N) \
INITIALIZE_REGISTER(Register, w##N, N, \ DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits, CPURegister::kRegister); \
kWRegSizeInBits, CPURegister::kRegister); \ DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits, CPURegister::kRegister);
INITIALIZE_REGISTER(Register, x##N, N, \
kXRegSizeInBits, CPURegister::kRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS) GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS #undef DEFINE_REGISTERS
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits, DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
CPURegister::kRegister); CPURegister::kRegister);
INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits, DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
CPURegister::kRegister); CPURegister::kRegister);
#define DEFINE_FPREGISTERS(N) \ #define DEFINE_FPREGISTERS(N) \
INITIALIZE_REGISTER(FPRegister, s##N, N, \ DEFINE_REGISTER(FPRegister, s##N, N, kSRegSizeInBits, \
kSRegSizeInBits, CPURegister::kFPRegister); \ CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, \ DEFINE_REGISTER(FPRegister, d##N, N, kDRegSizeInBits, \
kDRegSizeInBits, CPURegister::kFPRegister); CPURegister::kFPRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS) GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS #undef DEFINE_FPREGISTERS
#undef INITIALIZE_REGISTER #undef DEFINE_REGISTER
// Registers aliases. // Registers aliases.
ALIAS_REGISTER(Register, ip0, x16); ALIAS_REGISTER(Register, ip0, x16);
@ -566,8 +527,8 @@ class Immediate {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Operands. // Operands.
const int kSmiShift = kSmiTagSize + kSmiShiftSize; constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
// Represents an operand in a machine instruction. // Represents an operand in a machine instruction.
class Operand { class Operand {
@ -756,7 +717,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer // for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done // is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler. // upon destruction of the assembler.
Assembler(Isolate* arg_isolate, void* buffer, int buffer_size); Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler(); virtual ~Assembler();
@ -807,6 +770,7 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc); inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool); inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at( inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target, Isolate* isolate, Address pc, Address constant_pool, Address target,
@ -836,7 +800,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers. // All addresses in the constant pool are the same size as pointers.
static const int kSpecialTargetSize = kPointerSize; static constexpr int kSpecialTargetSize = kPointerSize;
// The sizes of the call sequences emitted by MacroAssembler::Call. // The sizes of the call sequences emitted by MacroAssembler::Call.
// Wherever possible, use MacroAssembler::CallSize instead of these constants, // Wherever possible, use MacroAssembler::CallSize instead of these constants,
@ -851,8 +815,8 @@ class Assembler : public AssemblerBase {
// With relocation: // With relocation:
// ldr temp, =target // ldr temp, =target
// blr temp // blr temp
static const int kCallSizeWithoutRelocation = 4 * kInstructionSize; static constexpr int kCallSizeWithoutRelocation = 4 * kInstructionSize;
static const int kCallSizeWithRelocation = 2 * kInstructionSize; static constexpr int kCallSizeWithRelocation = 2 * kInstructionSize;
// Size of the generated code in bytes // Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const { uint64_t SizeOfGeneratedCode() const {
@ -884,12 +848,12 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize; return SizeOfCodeGeneratedSince(label) / kInstructionSize;
} }
static const int kPatchDebugBreakSlotAddressOffset = 0; static constexpr int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call. // Number of instructions necessary to be able to later patch it to a call.
static const int kDebugBreakSlotInstructions = 5; static constexpr int kDebugBreakSlotInstructions = 5;
static const int kDebugBreakSlotLength = static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize; kDebugBreakSlotInstructions * kInstructionSize;
// Prevent contant pool emission until EndBlockConstPool is called. // Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal // Call to this function can be nested but must be followed by an equal
@ -1847,7 +1811,7 @@ class Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch // The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended // instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers. // in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize; static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
void RecordVeneerPool(int location_offset, int size); void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range. // Emits veneers for branches that are approaching their maximum range.
@ -2000,7 +1964,7 @@ class Assembler : public AssemblerBase {
// suitable for fields that take instruction offsets. // suitable for fields that take instruction offsets.
inline int LinkAndGetInstructionOffsetTo(Label* label); inline int LinkAndGetInstructionOffsetTo(Label* label);
static const int kStartOfLabelLinkChain = 0; static constexpr int kStartOfLabelLinkChain = 0;
// Verify that a label's link chain is intact. // Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const * label); void CheckLabelLinkChain(Label const * label);
@ -2061,17 +2025,17 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions // expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not // has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers. // an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckConstPoolInterval = 128; static constexpr int kCheckConstPoolInterval = 128;
// Distance to first use after a which a pool will be emitted. Pool entries // Distance to first use after a which a pool will be emitted. Pool entries
// are accessed with pc relative load therefore this cannot be more than // are accessed with pc relative load therefore this cannot be more than
// 1 * MB. Since constant pool emission checks are interval based this value // 1 * MB. Since constant pool emission checks are interval based this value
// is an approximation. // is an approximation.
static const int kApproxMaxDistToConstPool = 64 * KB; static constexpr int kApproxMaxDistToConstPool = 64 * KB;
// Number of pool entries after which a pool will be emitted. Since constant // Number of pool entries after which a pool will be emitted. Since constant
// pool emission checks are interval based this value is an approximation. // pool emission checks are interval based this value is an approximation.
static const int kApproxMaxPoolEntryCount = 512; static constexpr int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences. // Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero. int const_pool_blocked_nesting_; // Block emission if this is not zero.
@ -2082,8 +2046,9 @@ class Assembler : public AssemblerBase {
// Relocation info generation // Relocation info generation
// Each relocation is encoded as a variable size value // Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer; RelocInfoWriter reloc_info_writer;
// Internal reference positions, required for (potential) patching in // Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels // GrowBuffer(); contains only those internal references whose labels
// are already bound. // are already bound.
@ -2121,7 +2086,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large // not have to check for overflow. The same is true for writes of large
// relocation info entries, and debug strings encoded in the instruction // relocation info entries, and debug strings encoded in the instruction
// stream. // stream.
static const int kGap = 128; static constexpr int kGap = 128;
public: public:
class FarBranchInfo { class FarBranchInfo {
@ -2151,13 +2116,13 @@ class Assembler : public AssemblerBase {
// We generate a veneer for a branch if we reach within this distance of the // We generate a veneer for a branch if we reach within this distance of the
// limit of the range. // limit of the range.
static const int kVeneerDistanceMargin = 1 * KB; static constexpr int kVeneerDistanceMargin = 1 * KB;
// The factor of 2 is a finger in the air guess. With a default margin of // The factor of 2 is a finger in the air guess. With a default margin of
// 1KB, that leaves us an addional 256 instructions to avoid generating a // 1KB, that leaves us an addional 256 instructions to avoid generating a
// protective branch. // protective branch.
static const int kVeneerNoProtectionFactor = 2; static constexpr int kVeneerNoProtectionFactor = 2;
static const int kVeneerDistanceCheckMargin = static constexpr int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin; kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const { int unresolved_branches_first_limit() const {
DCHECK(!unresolved_branches_.empty()); DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first; return unresolved_branches_.begin()->first;
@ -2195,14 +2160,18 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some // If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler // relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer. // will crash trying to grow the buffer.
PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
: Assembler(isolate, reinterpret_cast<byte*>(start),
count * kInstructionSize + kGap) {
StartBlockPools();
}
// This version will flush at destruction.
PatchingAssembler(Isolate* isolate, byte* start, unsigned count) PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
: Assembler(isolate, start, count * kInstructionSize + kGap) { : PatchingAssembler(IsolateData(isolate), start, count) {
CHECK_NOT_NULL(isolate);
isolate_ = isolate;
}
// This version will not flush.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
: Assembler(isolate_data, start, count * kInstructionSize + kGap),
isolate_(nullptr) {
// Block constant pool emission. // Block constant pool emission.
StartBlockPools(); StartBlockPools();
} }
@ -2217,13 +2186,16 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty()); DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache. // Flush the Instruction cache.
size_t length = buffer_size_ - kGap; size_t length = buffer_size_ - kGap;
Assembler::FlushICache(isolate(), buffer_, length); if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
} }
// See definition of PatchAdrFar() for details. // See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = 2; static constexpr int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset); void PatchAdrFar(int64_t target_offset);
private:
Isolate* isolate_;
}; };

487
deps/v8/src/arm64/code-stubs-arm64.cc

@ -4,20 +4,25 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#include "src/code-stubs.h"
#include "src/api-arguments.h" #include "src/api-arguments.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h" #include "src/ic/handler-compiler.h"
#include "src/ic/ic.h" #include "src/ic/ic.h"
#include "src/ic/stub-cache.h" #include "src/ic/stub-cache.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h" #include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h" #include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h" #include "src/runtime/runtime.h"
#include "src/arm64/code-stubs-arm64.h" #include "src/arm64/code-stubs-arm64.h" // Cannot be the first include.
#include "src/arm64/frames-arm64.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -1264,223 +1269,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) { void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP #ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec); // This case is handled prior to the RegExpExecStub call.
__ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP #else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// jssp[0]: last_match_info (expected JSArray)
// jssp[8]: previous index
// jssp[16]: subject string
// jssp[24]: JSRegExp object
Label runtime;
// Use of registers for this function.
// Variable registers:
// x10-x13 used as scratch registers
// w0 string_type type of subject string
// x2 jsstring_length subject string length
// x3 jsregexp_object JSRegExp object
// w4 string_encoding Latin1 or UC16
// w5 sliced_string_offset if the string is a SlicedString
// offset to the underlying string
// w6 string_representation groups attributes of the string:
// - is a string
// - type of the string
// - is a short external string
Register string_type = w0;
Register jsstring_length = x2;
Register jsregexp_object = x3;
Register string_encoding = w4;
Register sliced_string_offset = w5;
Register string_representation = w6;
// These are in callee save registers and will be preserved by the call
// to the native RegExp code, as this code is called using the normal
// C calling convention. When calling directly from generated code the
// native RegExp code will not do a GC and therefore the content of
// these registers are safe to use after the call.
// x19 subject subject string
// x20 regexp_data RegExp data (FixedArray)
// x21 last_match_info_elements info relative to the last match
// (FixedArray)
// x22 code_object generated regexp code
Register subject = x19;
Register regexp_data = x20;
Register last_match_info_elements = x21;
Register code_object = x22;
// Stack frame.
// jssp[00]: last_match_info (JSArray)
// jssp[08]: previous index
// jssp[16]: subject string
// jssp[24]: JSRegExp object
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_size);
__ Ldr(x10, MemOperand(x10));
__ Cbz(x10, &runtime);
// Check that the first argument is a JSRegExp object.
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(jsregexp_object, kJSRegExpOffset);
__ JumpIfSmi(jsregexp_object, &runtime);
__ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
__ Tst(regexp_data, kSmiTagMask);
__ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
__ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
__ B(ne, &runtime);
// Check that the number of captures fit in the static offsets vector buffer.
// We have always at least one capture for the whole match, plus additional
// ones due to capturing parentheses. A capture takes 2 registers.
// The number of capture registers then is (number_of_captures + 1) * 2.
__ Ldrsw(x10,
UntagSmiFieldMemOperand(regexp_data,
JSRegExp::kIrregexpCaptureCountOffset));
// Check (number_of_captures + 1) * 2 <= offsets vector size
// number_of_captures * 2 <= offsets vector size - 2
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Add(x10, x10, x10);
__ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
__ B(hi, &runtime);
// Initialize offset for possibly sliced string.
__ Mov(sliced_string_offset, 0);
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(subject, kSubjectOffset);
__ JumpIfSmi(subject, &runtime);
__ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
// Handle subject string according to its encoding and representation:
// (1) Sequential string? If yes, go to (4).
// (2) Sequential or cons? If not, go to (5).
// (3) Cons string. If the string is flat, replace subject with first string
// and go to (1). Otherwise bail out to runtime.
// (4) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
// (5) Long external string? If not, go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
// (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label check_underlying; // (1)
Label seq_string; // (4)
Label not_seq_nor_cons; // (5)
Label external_string; // (6)
Label not_long_external; // (7)
__ Bind(&check_underlying);
__ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
__ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
// (1) Sequential string? If yes, go to (4).
__ And(string_representation,
string_type,
kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask);
// We depend on the fact that Strings of type
// SeqString and not ShortExternalString are defined
// by the following pattern:
// string_type: 0XX0 XX00
// ^ ^ ^^
// | | ||
// | | is a SeqString
// | is not a short external String
// is a String
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ Cbz(string_representation, &seq_string); // Go to (4).
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Cmp(string_representation, kExternalStringTag);
__ B(ge, &not_seq_nor_cons); // Go to (5).
// (3) Cons string. Check that it's flat.
__ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
__ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
// Replace subject with first string.
__ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ B(&check_underlying);
// (4) Sequential string. Load regexp code according to encoding.
__ Bind(&seq_string);
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kPreviousIndexOffset);
__ JumpIfNotSmi(x10, &runtime);
__ Cmp(jsstring_length, x10);
__ B(ls, &runtime);
// Argument 2 (x1): We need to load argument 2 (the previous index) into x1
// before entering the exit frame.
__ SmiUntag(x1, x10);
// The fourth bit determines the string encoding in string_type.
STATIC_ASSERT(kOneByteStringTag == 0x08);
STATIC_ASSERT(kTwoByteStringTag == 0x00);
STATIC_ASSERT(kStringEncodingMask == 0x08);
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x08
// UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
__ Add(x10, regexp_data, x10);
__ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
// (E) Carry on. String handling is done.
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(code_object, &runtime);
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
x10,
x11);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
__ EnterExitFrame(false, x10, 1); __ EnterExitFrame(false, x10, 1);
DCHECK(csp.Is(__ StackPointer())); DCHECK(csp.Is(__ StackPointer()));
@ -1496,50 +1287,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x10, ExternalReference::isolate_address(isolate())); __ Mov(x10, ExternalReference::isolate_address(isolate()));
__ Poke(x10, kPointerSize); __ Poke(x10, kPointerSize);
Register length = w11;
Register previous_index_in_bytes = w12;
Register start = x13;
// Load start of the subject string.
__ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
// sizes below the previous sp. (Because creating a new stack frame pushes
// the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
__ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
// string_encoding: if Latin1: 0x08
// if UC16: 0x00
STATIC_ASSERT(kStringEncodingMask == 0x08);
__ Ubfx(string_encoding, string_encoding, 3, 1);
__ Eor(string_encoding, string_encoding, 1);
// string_encoding: if Latin1: 0
// if UC16: 1
// Convert string positions from characters to bytes.
// Previous index is in x1.
__ Lsl(previous_index_in_bytes, w1, string_encoding);
__ Lsl(length, length, string_encoding);
__ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
// Argument 1 (x0): Subject string. // Argument 1 (x0): Subject string.
__ Mov(x0, subject); CHECK(x0.is(RegExpExecDescriptor::StringRegister()));
// Argument 2 (x1): Previous index, already there. // Argument 2 (x1): Previous index, already there.
CHECK(x1.is(RegExpExecDescriptor::LastIndexRegister()));
// Argument 3 (x2): Get the start of input. // Argument 3 (x2): Input start.
// Start of input = start of string + previous index + substring offset // Argument 4 (x3): Input end.
// (0 if the string CHECK(x2.is(RegExpExecDescriptor::StringStartRegister()));
// is not sliced). CHECK(x3.is(RegExpExecDescriptor::StringEndRegister()));
__ Add(w10, previous_index_in_bytes, sliced_string_offset);
__ Add(x2, start, Operand(w10, UXTW));
// Argument 4 (x3):
// End of input = start of input + (length of input - previous index)
__ Sub(w10, length, previous_index_in_bytes);
__ Add(x3, x2, Operand(w10, UXTW));
// Argument 5 (x4): static offsets vector buffer. // Argument 5 (x4): static offsets vector buffer.
__ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate())); __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
@ -1550,6 +1307,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x5, 0); __ Mov(x5, 0);
// Argument 7 (x6): Start (high end) of backtracking stack memory area. // Argument 7 (x6): Start (high end) of backtracking stack memory area.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_address); __ Mov(x10, address_of_regexp_stack_memory_address);
__ Ldr(x10, MemOperand(x10)); __ Ldr(x10, MemOperand(x10));
__ Mov(x11, address_of_regexp_stack_memory_size); __ Mov(x11, address_of_regexp_stack_memory_size);
@ -1560,184 +1321,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x7, 1); __ Mov(x7, 1);
// Locate the code entry and call it. // Locate the code entry and call it.
Register code_object = RegExpExecDescriptor::CodeRegister();
__ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag); __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
DirectCEntryStub stub(isolate()); DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code_object); stub.GenerateCall(masm, code_object);
__ LeaveExitFrame(false, x10, true); __ LeaveExitFrame(false, x10, true);
// The generated regexp code returns an int32 in w0. // Return the smi-tagged result.
Label failure, exception; __ SmiTag(x0);
__ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
__ CompareAndBranch(w0,
NativeRegExpMacroAssembler::EXCEPTION,
eq,
&exception);
__ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
// Success: process the result from the native regexp code.
Register number_of_capture_registers = x12;
// Calculate number of capture registers (number_of_captures + 1) * 2
// and store it in the last match info.
__ Ldrsw(x10,
UntagSmiFieldMemOperand(regexp_data,
JSRegExp::kIrregexpCaptureCountOffset));
__ Add(x10, x10, x10);
__ Add(number_of_capture_registers, x10, 2);
// Check that the last match info is a FixedArray.
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(last_match_info_elements, kLastMatchInfoOffset);
__ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
__ Ldr(x10,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information (overhead).
// (number_of_captures + 1) * 2 + overhead <= last match info size
// (number_of_captures * 2) + 2 + overhead <= last match info size
// number_of_capture_registers + overhead <= last match info size
__ Ldrsw(x10,
UntagSmiFieldMemOperand(last_match_info_elements,
FixedArray::kLengthOffset));
__ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
__ Cmp(x11, x10);
__ B(gt, &runtime);
// Store the capture count.
__ SmiTag(x10, number_of_capture_registers);
__ Str(x10, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
__ Str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset));
// Use x10 as the subject string in order to only need
// one RecordWriteStub.
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset, x10, x11,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset));
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset, x10, x11,
kLRHasNotBeenSaved, kDontSaveFPRegs);
Register last_match_offsets = x13;
Register offsets_vector_index = x14;
Register current_offset = x15;
// Get the static offsets vector filled by the native regexp code
// and fill the last match info.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector(isolate());
__ Mov(offsets_vector_index, address_of_static_offsets_vector);
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// iterates down to zero (inclusive).
__ Add(last_match_offsets, last_match_info_elements,
RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
__ Bind(&next_capture);
__ Subs(number_of_capture_registers, number_of_capture_registers, 2);
__ B(mi, &done);
// Read two 32 bit values from the static offsets vector buffer into
// an X register
__ Ldr(current_offset,
MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
STATIC_ASSERT(kSmiTag == 0);
__ Bic(x11, current_offset, kSmiShiftMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
__ B(&next_capture);
__ Bind(&done);
// Return last match info.
__ Mov(x0, last_match_info_elements);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
__ Bind(&exception);
Register exception_value = x0;
// A stack overflow (on the backtrack stack) may have occured
// in the RegExp code but no exception has been created yet.
// If there is no pending exception, handle that in the runtime system.
__ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Mov(x11,
Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ Ldr(exception_value, MemOperand(x11));
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret(); __ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (5) Long external string? If not, go to (7).
__ Bind(&not_seq_nor_cons);
// Compare flags are still set.
__ B(ne, &not_long_external); // Go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
__ Bind(&external_string);
if (masm->emit_debug_code()) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Tst(x10, kIsIndirectStringMask);
__ Check(eq, kExternalStringExpectedButNotFound);
__ And(x10, x10, kStringRepresentationMask);
__ Cmp(x10, 0);
__ Check(ne, kExternalStringExpectedButNotFound);
}
__ Ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&seq_string); // Go to (4).
// (7) If this is a short external string or not a string, bail out to
// runtime.
__ Bind(&not_long_external);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ TestAndBranchIfAnySet(string_representation,
kShortExternalStringMask | kIsNotStringMask,
&runtime);
// (8) Sliced or thin string. Replace subject with parent.
Label thin_string;
__ Cmp(string_representation, kThinStringTag);
__ B(eq, &thin_string);
__ Ldr(sliced_string_offset,
UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
__ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ B(&check_underlying); // Go to (1).
__ bind(&thin_string);
__ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
__ B(&check_underlying); // Go to (1).
#endif #endif
} }
@ -2509,6 +2102,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated // We need some extra registers for this stub, they have been allocated
@ -2566,6 +2190,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
} }
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker( void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, MacroAssembler* masm,

40
deps/v8/src/arm64/code-stubs-arm64.h

@ -130,9 +130,7 @@ class RecordWriteStub: public PlatformCodeStub {
// so effectively a nop. // so effectively a nop.
static void Patch(Code* stub, Mode mode) { static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub. // We are going to patch the two first instructions of the stub.
PatchingAssembler patcher( PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
stub->GetIsolate(),
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0); Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize); Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'. // Instructions must be either 'adr' or 'b'.
@ -172,37 +170,7 @@ class RecordWriteStub: public PlatformCodeStub {
// The 'object' and 'address' registers must be preserved. // The 'object' and 'address' registers must be preserved.
class RegisterAllocation { class RegisterAllocation {
public: public:
RegisterAllocation(Register object, RegisterAllocation(Register object, Register address, Register scratch);
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void Save(MacroAssembler* masm) { void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as // We don't have to save scratch0_ because it was given to us as
@ -288,9 +256,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode); Mode mode);
void InformIncrementalMarker(MacroAssembler* masm); void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override { void Activate(Code* code) override;
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
Register object() const { Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_)); return Register::from_code(ObjectBits::decode(minor_key_));

2
deps/v8/src/arm64/codegen-arm64.cc

@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h" #include "src/arm64/simulator-arm64.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"

11
deps/v8/src/arm64/constants-arm64.h

@ -199,7 +199,14 @@ const unsigned kFloatExponentBits = 8;
V_(SysOp1, 18, 16, Bits) \ V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \ V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \ V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits) V_(CRm, 11, 8, Bits) \
\
/* Load-/store-exclusive */ \
V_(LoadStoreXLoad, 22, 22, Bits) \
V_(LoadStoreXNotExclusive, 23, 23, Bits) \
V_(LoadStoreXAcquireRelease, 15, 15, Bits) \
V_(LoadStoreXSizeLog2, 31, 30, Bits) \
V_(LoadStoreXPair, 21, 21, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \ /* NZCV */ \
@ -857,7 +864,7 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET #undef LOAD_STORE_REGISTER_OFFSET
}; };
// Load/store acquire/release // Load/store acquire/release.
enum LoadStoreAcquireReleaseOp { enum LoadStoreAcquireReleaseOp {
LoadStoreAcquireReleaseFixed = 0x08000000, LoadStoreAcquireReleaseFixed = 0x08000000,
LoadStoreAcquireReleaseFMask = 0x3F000000, LoadStoreAcquireReleaseFMask = 0x3F000000,

53
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h" #include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h" #include "src/full-codegen/full-codegen.h"
@ -94,11 +97,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// caller-saved registers here. Callee-saved registers can be stored directly // caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame. // in the input frame.
// Save all allocatable floating point registers. // Save all allocatable double registers.
CPURegList saved_fp_registers( CPURegList saved_double_registers(
CPURegister::kFPRegister, kDRegSizeInBits, CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()); RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_fp_registers); __ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kFPRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
// We save all the registers expcept jssp, sp and lr. // We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27); CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
@ -110,10 +119,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kSavedRegistersAreaSize = const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) + (saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize); (saved_double_registers.Count() * kDRegSize) +
(saved_float_registers.Count() * kSRegSize);
// Floating point registers are saved on the stack above core registers. // Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSize; const int kFloatRegistersOffset = saved_registers.Count() * kXRegSize;
const int kDoubleRegistersOffset =
kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
// Get the bailout id from the stack. // Get the bailout id from the stack.
Register bailout_id = x2; Register bailout_id = x2;
@ -165,17 +177,28 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Str(x2, MemOperand(x1, offset)); __ Str(x2, MemOperand(x1, offset));
} }
// Copy FP registers to the input frame. // Copy double registers to the input frame.
CPURegList copy_fp_to_input = saved_fp_registers; CPURegList copy_double_to_input = saved_double_registers;
for (int i = 0; i < saved_fp_registers.Count(); i++) { for (int i = 0; i < saved_double_registers.Count(); i++) {
int src_offset = kFPRegistersOffset + (i * kDoubleSize); int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset); __ Peek(x2, src_offset);
CPURegister reg = copy_fp_to_input.PopLowestIndex(); CPURegister reg = copy_double_to_input.PopLowestIndex();
int dst_offset = FrameDescription::double_registers_offset() + int dst_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize); (reg.code() * kDoubleSize);
__ Str(x2, MemOperand(x1, dst_offset)); __ Str(x2, MemOperand(x1, dst_offset));
} }
// Copy float registers to the input frame.
CPURegList copy_float_to_input = saved_float_registers;
for (int i = 0; i < saved_float_registers.Count(); i++) {
int src_offset = kFloatRegistersOffset + (i * kFloatSize);
__ Peek(w2, src_offset);
CPURegister reg = copy_float_to_input.PopLowestIndex();
int dst_offset =
FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
__ Str(w2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack. // Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize)); __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@ -241,11 +264,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ B(lt, &outer_push_loop); __ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset())); __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) && DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) && !saved_double_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch)); !saved_double_registers.IncludesAliasOf(fp_scratch));
while (!saved_fp_registers.IsEmpty()) { while (!saved_double_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex(); const CPURegister reg = saved_double_registers.PopLowestIndex();
int src_offset = FrameDescription::double_registers_offset() + int src_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize); (reg.code() * kDoubleSize);
__ Ldr(reg, MemOperand(x1, src_offset)); __ Ldr(reg, MemOperand(x1, src_offset));

11
deps/v8/src/arm64/disasm-arm64.cc

@ -916,10 +916,10 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) { void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
const char *mnemonic = "unimplemented"; const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xn]"; const char* form = "'Wt, ['Xns]";
const char *form_x = "'Xt, ['Xn]"; const char* form_x = "'Xt, ['Xns]";
const char *form_stlx = "'Ws, 'Wt, ['Xn]"; const char* form_stlx = "'Ws, 'Wt, ['Xns]";
const char *form_stlx_x = "'Ws, 'Xt, ['Xn]"; const char* form_stlx_x = "'Ws, 'Xt, ['Xns]";
switch (instr->Mask(LoadStoreAcquireReleaseMask)) { switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
case LDAXR_b: mnemonic = "ldaxrb"; break; case LDAXR_b: mnemonic = "ldaxrb"; break;
@ -938,7 +938,8 @@ void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break; case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break; case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break; case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
default: form = "(LoadStoreAcquireReleaseMask)"; default:
form = "(LoadStoreAcquireRelease)";
} }
Format(instr, mnemonic, form); Format(instr, mnemonic, form);
} }

1
deps/v8/src/arm64/eh-frame-arm64.cc

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/arm64/assembler-arm64-inl.h"
#include "src/eh-frame.h" #include "src/eh-frame.h"
namespace v8 { namespace v8 {

21
deps/v8/src/arm64/instructions-arm64.cc

@ -218,22 +218,22 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target)); return IsValidImmPCOffset(BranchType(), DistanceTo(target));
} }
void Instruction::SetImmPCOffsetTarget(Assembler::IsolateData isolate_data,
void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) { Instruction* target) {
if (IsPCRelAddressing()) { if (IsPCRelAddressing()) {
SetPCRelImmTarget(isolate, target); SetPCRelImmTarget(isolate_data, target);
} else if (BranchType() != UnknownBranchType) { } else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target); SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) { } else if (IsUnresolvedInternalReference()) {
SetUnresolvedInternalReferenceImmTarget(isolate, target); SetUnresolvedInternalReferenceImmTarget(isolate_data, target);
} else { } else {
// Load literal (offset from PC). // Load literal (offset from PC).
SetImmLLiteral(target); SetImmLLiteral(target);
} }
} }
void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) { Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction. // ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr()); DCHECK(IsAdr());
@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset)); imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm); SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else { } else {
PatchingAssembler patcher(isolate, this, PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this),
PatchingAssembler::kAdrFarPatchableNInstrs); PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset); patcher.PatchAdrFar(target_offset);
} }
@ -283,9 +283,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
SetInstructionBits(Mask(~imm_mask) | branch_imm); SetInstructionBits(Mask(~imm_mask) | branch_imm);
} }
void Instruction::SetUnresolvedInternalReferenceImmTarget(
void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate, Assembler::IsolateData isolate_data, Instruction* target) {
Instruction* target) {
DCHECK(IsUnresolvedInternalReference()); DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize)); DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2)); DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@ -294,7 +293,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset); uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset); uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
PatchingAssembler patcher(isolate, this, 2); PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this), 2);
patcher.brk(high16); patcher.brk(high16);
patcher.brk(low16); patcher.brk(low16);
} }

10
deps/v8/src/arm64/instructions-arm64.h

@ -7,13 +7,13 @@
#include "src/arm64/constants-arm64.h" #include "src/arm64/constants-arm64.h"
#include "src/arm64/utils-arm64.h" #include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/utils.h" #include "src/utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// ISA constants. -------------------------------------------------------------- // ISA constants. --------------------------------------------------------------
typedef uint32_t Instr; typedef uint32_t Instr;
@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target); bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction. // a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target); void SetImmPCOffsetTarget(AssemblerBase::IsolateData isolate_data,
void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate, Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(AssemblerBase::IsolateData,
Instruction* target); Instruction* target);
// Patch a literal load instruction to load from 'source'. // Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source); void SetImmLLiteral(Instruction* source);
@ -411,7 +412,8 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21; static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); } static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Isolate* isolate, Instruction* target); void SetPCRelImmTarget(AssemblerBase::IsolateData isolate_data,
Instruction* target);
void SetBranchImmTarget(Instruction* target); void SetBranchImmTarget(Instruction* target);
}; };

60
deps/v8/src/arm64/instrument-arm64.cc

@ -61,39 +61,39 @@ typedef struct {
CounterType type; CounterType type;
} CounterDescriptor; } CounterDescriptor;
static const CounterDescriptor kCounterList[] = { static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative}, {"Instruction", Cumulative},
{"Move Immediate", Gauge}, {"Move Immediate", Gauge},
{"Add/Sub DP", Gauge}, {"Add/Sub DP", Gauge},
{"Logical DP", Gauge}, {"Logical DP", Gauge},
{"Other Int DP", Gauge}, {"Other Int DP", Gauge},
{"FP DP", Gauge}, {"FP DP", Gauge},
{"Conditional Select", Gauge}, {"Conditional Select", Gauge},
{"Conditional Compare", Gauge}, {"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge}, {"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge}, {"Compare and Branch", Gauge},
{"Test and Branch", Gauge}, {"Test and Branch", Gauge},
{"Conditional Branch", Gauge}, {"Conditional Branch", Gauge},
{"Load Integer", Gauge}, {"Load Integer", Gauge},
{"Load FP", Gauge}, {"Load FP", Gauge},
{"Load Pair", Gauge}, {"Load Pair", Gauge},
{"Load Literal", Gauge}, {"Load Literal", Gauge},
{"Load Acquire", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge}, {"Store Integer", Gauge},
{"Store Pair", Gauge}, {"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge}, {"Store Release", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge}, {"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
}; };
Instrument::Instrument(const char* datafile, uint64_t sample_period) Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) { : output_stream_(stderr), sample_period_(sample_period) {

56
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -57,6 +57,11 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; } const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register RegExpExecDescriptor::StringRegister() { return x0; }
const Register RegExpExecDescriptor::LastIndexRegister() { return x1; }
const Register RegExpExecDescriptor::StringStartRegister() { return x2; }
const Register RegExpExecDescriptor::StringEndRegister() { return x3; }
const Register RegExpExecDescriptor::CodeRegister() { return x8; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; } const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; } const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@ -310,46 +315,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
x2, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
x2, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
x0, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific( void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
@ -388,7 +353,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
x0, // argument count (not including receiver) x0, // argument count (not including receiver)
@ -398,7 +363,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
x0, // argument count (not including receiver) x0, // argument count (not including receiver)
@ -410,8 +375,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific( void InterpreterPushArgsThenConstructArrayDescriptor::
CallInterfaceDescriptorData* data) { InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = { Register registers[] = {
x0, // argument count (not including receiver) x0, // argument count (not including receiver)
x1, // target to call checked to be Array function x1, // target to call checked to be Array function
@ -436,7 +401,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = { Register registers[] = {
x0, // the value to pass to the generator x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume x1, // the JSGeneratorObject to resume
x2 // the resume mode (tagged) x2, // the resume mode (tagged)
x3 // SuspendFlags (tagged)
}; };
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }

27
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -12,9 +12,8 @@
#include "src/arm64/assembler-arm64-inl.h" #include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h" #include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h" #include "src/arm64/instrument-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -37,12 +36,6 @@ MemOperand UntagSmiMemOperand(Register object, int offset) {
} }
Handle<Object> MacroAssembler::CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
void MacroAssembler::And(const Register& rd, void MacroAssembler::And(const Register& rd,
const Register& rn, const Register& rn,
const Operand& operand) { const Operand& operand) {
@ -1239,6 +1232,14 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn); uxtw(rd, rn);
} }
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
void MacroAssembler::BumpSystemStackPointer(const Operand& space) { void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_)); DCHECK(!csp.Is(sp_));
@ -1441,14 +1442,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
Bic(untagged_obj, obj, kHeapObjectTag); Bic(untagged_obj, obj, kHeapObjectTag);
} }
void MacroAssembler::jmp(Label* L) { B(L); }
void MacroAssembler::IsObjectNameType(Register object,
Register type,
Label* fail) {
CompareObjectType(object, type, type, LAST_NAME_TYPE);
B(hi, fail);
}
void MacroAssembler::IsObjectJSStringType(Register object, void MacroAssembler::IsObjectJSStringType(Register object,
Register type, Register type,
@ -1477,6 +1471,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
Push(tmp); Push(tmp);
} }
void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void MacroAssembler::Claim(int64_t count, uint64_t unit_size) { void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0); DCHECK(count >= 0);

182
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -4,16 +4,19 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#include "src/arm64/frames-arm64.h"
#include "src/assembler.h"
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/division-by-constant.h" #include "src/base/division-by-constant.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/debug/debug.h" #include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
#include "src/register-configuration.h" #include "src/register-configuration.h"
#include "src/runtime/runtime.h" #include "src/runtime/runtime.h"
#include "src/arm64/frames-arm64.h" #include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" #include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -21,23 +24,23 @@ namespace internal {
// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros. // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
#define __ #define __
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
unsigned buffer_size, unsigned buffer_size,
CodeObjectRequired create_code_object) CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, buffer_size), : Assembler(isolate, buffer, buffer_size),
generating_stub_(false), generating_stub_(false),
#if DEBUG #if DEBUG
allow_macro_instructions_(true), allow_macro_instructions_(true),
#endif #endif
has_frame_(false), has_frame_(false),
isolate_(isolate),
use_real_aborts_(true), use_real_aborts_(true),
sp_(jssp), sp_(jssp),
tmp_list_(DefaultTmpList()), tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) { fptmp_list_(DefaultFPTmpList()) {
if (create_code_object == CodeObjectRequired::kYes) { if (create_code_object == CodeObjectRequired::kYes) {
code_object_ = code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
} }
} }
@ -1232,6 +1235,12 @@ void MacroAssembler::PopPostamble(Operand total_size) {
} }
} }
void MacroAssembler::PushPreamble(int count, int size) {
PushPreamble(count * size);
}
void MacroAssembler::PopPostamble(int count, int size) {
PopPostamble(count * size);
}
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) { if (offset.IsImmediate()) {
@ -1428,6 +1437,21 @@ void MacroAssembler::LoadHeapObject(Register result,
Mov(result, Operand(object)); Mov(result, Operand(object));
} }
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
DCHECK(object->IsSmi());
Mov(result, Operand(object));
}
}
void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void MacroAssembler::Move(Register dst, Handle<Object> x) {
LoadObject(dst, x);
}
void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void MacroAssembler::LoadInstanceDescriptors(Register map, void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) { Register descriptors) {
@ -1595,20 +1619,6 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
} }
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAName);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(temp, temp, LAST_NAME_TYPE);
Check(ls, kOperandIsNotAName);
}
}
void MacroAssembler::AssertFunction(Register object) { void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) { if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAFunction); AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
@ -1634,31 +1644,36 @@ void MacroAssembler::AssertBoundFunction(Register object) {
} }
} }
void MacroAssembler::AssertGeneratorObject(Register object) { void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
if (emit_debug_code()) { // `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject); if (!emit_debug_code()) return;
AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
UseScratchRegisterScope temps(this); // Load map
Register temp = temps.AcquireX(); UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE); // Load instance type
Check(eq, kOperandIsNotAGeneratorObject); Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
}
}
void MacroAssembler::AssertReceiver(Register object) { Label async, do_check;
if (emit_debug_code()) { STATIC_ASSERT(static_cast<int>(SuspendFlags::kGeneratorTypeMask) == 4);
AssertNotSmi(object, kOperandIsASmiAndNotAReceiver); DCHECK(!temp.is(flags));
B(&async, reg_bit_set, flags, 2);
UseScratchRegisterScope temps(this); // Check if JSGeneratorObject
Register temp = temps.AcquireX(); Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
jmp(&do_check);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); bind(&async);
CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE); // Check if JSAsyncGeneratorObject
Check(hs, kOperandIsNotAReceiver); Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
}
}
bind(&do_check);
// Restore generator object to register and perform assertion
Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) { Register scratch) {
@ -1674,20 +1689,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
} }
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(ne, kOperandIsASmiAndNotAString);
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
Check(lo, kOperandIsNotAString);
}
}
void MacroAssembler::AssertPositiveOrZero(Register value) { void MacroAssembler::AssertPositiveOrZero(Register value) {
if (emit_debug_code()) { if (emit_debug_code()) {
Label done; Label done;
@ -1698,28 +1699,6 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
} }
} }
void MacroAssembler::AssertNotNumber(Register value) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(value, kSmiTagMask);
Check(ne, kOperandIsANumber);
Label done;
JumpIfNotHeapNumber(value, &done);
Abort(kOperandIsANumber);
Bind(&done);
}
}
void MacroAssembler::AssertNumber(Register value) {
if (emit_debug_code()) {
Label done;
JumpIfSmi(value, &done);
JumpIfHeapNumber(value, &done);
Abort(kOperandIsNotANumber);
Bind(&done);
}
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@ -3331,30 +3310,6 @@ void MacroAssembler::CheckMap(Register obj_map,
} }
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
B(ne, &fail);
Jump(success, RelocInfo::CODE_TARGET);
Bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
Mov(scratch, Operand(cell));
Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
Cmp(value, scratch);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell)); Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset)); Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@ -3384,7 +3339,6 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result); DecodeField<Map::ElementsKindBits>(result);
} }
void MacroAssembler::GetMapConstructor(Register result, Register map, void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) { Register temp, Register temp2) {
Label done, loop; Label done, loop;
@ -3683,6 +3637,13 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters(); PopSafepointRegisters();
} }
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect. // Make sure the safepoint registers list is what we expect.
@ -4082,20 +4043,6 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
} }
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Label ok;
Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
Abort(kJSObjectWithFastElementsMapHasSlowElements);
Bind(&ok);
}
}
void MacroAssembler::AssertIsString(const Register& object) { void MacroAssembler::AssertIsString(const Register& object) {
if (emit_debug_code()) { if (emit_debug_code()) {
@ -4584,6 +4531,13 @@ CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
return reg; return reg;
} }
MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
#define __ masm-> #define __ masm->

93
deps/v8/src/arm64/macro-assembler-arm64.h

@ -167,7 +167,12 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size, MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
CodeObjectRequired create_code_object); CodeObjectRequired create_code_object);
inline Handle<Object> CodeObject(); Isolate* isolate() const { return isolate_; }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Instruction set functions ------------------------------------------------ // Instruction set functions ------------------------------------------------
// Logical macros. // Logical macros.
@ -672,7 +677,7 @@ class MacroAssembler : public Assembler {
// This is a convenience method for pushing a single Handle<Object>. // This is a convenience method for pushing a single Handle<Object>.
inline void Push(Handle<Object> handle); inline void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } inline void Push(Smi* smi);
// Aliases of Push and Pop, required for V8 compatibility. // Aliases of Push and Pop, required for V8 compatibility.
inline void push(Register src) { inline void push(Register src) {
@ -872,14 +877,7 @@ class MacroAssembler : public Assembler {
// Align csp for a frame, as per ActivationFrameAlignment, and make it the // Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer. // current stack pointer.
inline void AlignAndSetCSPForFrame() { inline void AlignAndSetCSPForFrame();
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
// Push the system stack pointer (csp) down to allow the same to be done to // Push the system stack pointer (csp) down to allow the same to be done to
// the current stack pointer (according to StackPointer()). This must be // the current stack pointer (according to StackPointer()). This must be
@ -923,23 +921,15 @@ class MacroAssembler : public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object); void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) { void LoadObject(Register result, Handle<Object> object);
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
DCHECK(object->IsSmi());
Mov(result, Operand(object));
}
}
static int SafepointRegisterStackIndex(int reg_code); static int SafepointRegisterStackIndex(int reg_code);
// This is required for compatibility with architecture independant code. // This is required for compatibility with architecture independant code.
// Remove if not needed. // Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); } void Move(Register dst, Register src);
inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); } void Move(Register dst, Handle<Object> x);
inline void Move(Register dst, Smi* src) { Mov(dst, src); } void Move(Register dst, Smi* src);
void LoadInstanceDescriptors(Register map, void LoadInstanceDescriptors(Register map,
Register descriptors); Register descriptors);
@ -1004,38 +994,25 @@ class MacroAssembler : public Assembler {
inline void ObjectTag(Register tagged_obj, Register obj); inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj); inline void ObjectUntag(Register untagged_obj, Register obj);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code. // Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object); void AssertFunction(Register object);
// Abort execution if argument is not a JSGeneratorObject, // Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code. // enabled via --debug-code.
void AssertGeneratorObject(Register object); void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not a JSBoundFunction, // Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code. // enabled via --debug-code.
void AssertBoundFunction(Register object); void AssertBoundFunction(Register object);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled // Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code. // via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch); void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a positive or zero integer, enabled via // Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code. // --debug-code.
void AssertPositiveOrZero(Register value); void AssertPositiveOrZero(Register value);
// Abort execution if argument is not a number (heap number or smi).
void AssertNumber(Register value);
void AssertNotNumber(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number, void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK); SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number, void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@ -1112,7 +1089,7 @@ class MacroAssembler : public Assembler {
// ---- Calling / Jumping helpers ---- // ---- Calling / Jumping helpers ----
// This is required for compatibility in architecture indepenedant code. // This is required for compatibility in architecture indepenedant code.
inline void jmp(Label* L) { B(L); } inline void jmp(Label* L);
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None()); void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub); void TailCallStub(CodeStub* stub);
@ -1445,16 +1422,6 @@ class MacroAssembler : public Assembler {
Label* fail, Label* fail,
SmiCheckType smi_check_type); SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
void GetWeakValue(Register value, Handle<WeakCell> cell); void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given // Load the value of the weak cell in the value register. Branch to the given
@ -1485,13 +1452,6 @@ class MacroAssembler : public Assembler {
Heap::RootListIndex index, Heap::RootListIndex index,
Label* if_not_equal); Label* if_not_equal);
// Load and check the instance type of an object for being a unique name.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
// will be overwritten with the type.
// Fall-through if the object was a string and jump on fail otherwise.
inline void IsObjectNameType(Register object, Register type, Label* fail);
// Load and check the instance type of an object for being a string. // Load and check the instance type of an object for being a string.
// Loads the type into the second argument register. // Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it // The object and type arguments can be the same register; in that case it
@ -1665,15 +1625,11 @@ class MacroAssembler : public Assembler {
void PopSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for register dst. // Store value in register src in the safepoint stack slot for register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst) { void StoreToSafepointRegisterSlot(Register src, Register dst);
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
// Load the value of the src register from its safepoint stack slot // Load the value of the src register from its safepoint stack slot
// into register dst. // into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src) { void LoadFromSafepointRegisterSlot(Register dst, Register src);
Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
void CheckPageFlag(const Register& object, const Register& scratch, int mask, void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met); Condition cc, Label* condition_met);
@ -1808,7 +1764,6 @@ class MacroAssembler : public Assembler {
Register reg, Register reg,
Heap::RootListIndex index, Heap::RootListIndex index,
BailoutReason reason = kRegisterDidNotMatchExpectedRoot); BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
void AssertFastElements(Register elements);
// Abort if the specified register contains the invalid color bit pattern. // Abort if the specified register contains the invalid color bit pattern.
// The pattern must be in bits [1:0] of 'reg' register. // The pattern must be in bits [1:0] of 'reg' register.
@ -1922,8 +1877,8 @@ class MacroAssembler : public Assembler {
void PushPreamble(Operand total_size); void PushPreamble(Operand total_size);
void PopPostamble(Operand total_size); void PopPostamble(Operand total_size);
void PushPreamble(int count, int size) { PushPreamble(count * size); } void PushPreamble(int count, int size);
void PopPostamble(int count, int size) { PopPostamble(count * size); } void PopPostamble(int count, int size);
private: private:
// The actual Push and Pop implementations. These don't generate any code // The actual Push and Pop implementations. These don't generate any code
@ -1977,6 +1932,7 @@ class MacroAssembler : public Assembler {
bool allow_macro_instructions_; bool allow_macro_instructions_;
#endif #endif
bool has_frame_; bool has_frame_;
Isolate* isolate_;
// The Abort method should call a V8 runtime function, but the CallRuntime // The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntryStub. If use_real_aborts is false, Abort will // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
@ -2118,15 +2074,8 @@ class UseScratchRegisterScope {
RegList old_availablefp_; // kFPRegister RegList old_availablefp_; // kFPRegister
}; };
MemOperand ContextMemOperand(Register context, int index = 0);
inline MemOperand ContextMemOperand(Register context, int index = 0) { MemOperand NativeContextMemOperand();
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
// Encode and decode information about patchable inline SMI checks. // Encode and decode information about patchable inline SMI checks.
class InlineSmiCheckInfo { class InlineSmiCheckInfo {

314
deps/v8/src/arm64/simulator-arm64.cc

@ -10,10 +10,11 @@
#include "src/arm64/decoder-arm64-inl.h" #include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/simulator-arm64.h" #include "src/arm64/simulator-arm64.h"
#include "src/assembler.h" #include "src/assembler-inl.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/disasm.h" #include "src/disasm.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h" #include "src/ostreams.h"
#include "src/runtime/runtime-utils.h" #include "src/runtime/runtime-utils.h"
@ -55,6 +56,9 @@ TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
// static
base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
LAZY_INSTANCE_INITIALIZER;
// This is basically the same as PrintF, with a guard for FLAG_trace_sim. // This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) { void Simulator::TraceSim(const char* format, ...) {
@ -429,6 +433,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() { Simulator::~Simulator() {
global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_); delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) { if (FLAG_log_instruction_stats) {
delete instrument_; delete instrument_;
@ -1628,6 +1633,15 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0; uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
} else {
local_monitor_.NotifyStore(address);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback // Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible // and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does // to be interrupted in between. The simulator is not thread safe and V8 does
@ -1730,6 +1744,19 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
uintptr_t address2 = address + access_size; uintptr_t address2 = address + access_size;
uintptr_t stack = 0; uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
local_monitor_.NotifyLoad(address2);
} else {
local_monitor_.NotifyStore(address);
local_monitor_.NotifyStore(address2);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
global_monitor_.Pointer()->NotifyStore_Locked(address2,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback // Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible // and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does // to be interrupted in between. The simulator is not thread safe and V8 does
@ -1853,6 +1880,9 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress(); uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt(); unsigned rt = instr->Rt();
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
local_monitor_.NotifyLoad(address);
switch (instr->Mask(LoadLiteralMask)) { switch (instr->Mask(LoadLiteralMask)) {
// Use _no_log variants to suppress the register trace (LOG_REGS, // Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_FP_REGS), then print a more detailed log. // LOG_FP_REGS), then print a more detailed log.
@ -1906,8 +1936,108 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
} }
} }
Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
switch (size) {
case 0:
return TransactionSize::None;
case 1:
return TransactionSize::Byte;
case 2:
return TransactionSize::HalfWord;
case 4:
return TransactionSize::Word;
default:
UNREACHABLE();
}
return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
// TODO(binji) unsigned rt = instr->Rt();
unsigned rn = instr->Rn();
LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
instr->Mask(LoadStoreAcquireReleaseMask));
int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
int32_t is_load = instr->LoadStoreXLoad();
int32_t is_pair = instr->LoadStoreXPair();
USE(is_acquire_release);
USE(is_pair);
DCHECK_NE(is_acquire_release, 0); // Non-acquire/release unimplemented.
DCHECK_EQ(is_pair, 0); // Pair unimplemented.
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
DCHECK_EQ(address % access_size, 0);
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (is_load != 0) {
if (is_exclusive) {
local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
global_monitor_.Pointer()->NotifyLoadExcl_Locked(
address, &global_monitor_processor_);
} else {
local_monitor_.NotifyLoad(address);
}
switch (op) {
case LDAR_b:
case LDAXR_b:
set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
break;
case LDAR_h:
case LDAXR_h:
set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
break;
case LDAR_w:
case LDAXR_w:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
break;
default:
UNIMPLEMENTED();
}
LogRead(address, access_size, rt);
} else {
if (is_exclusive) {
unsigned rs = instr->Rs();
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
address, &global_monitor_processor_)) {
switch (op) {
case STLXR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
break;
case STLXR_h:
MemoryWrite<uint16_t>(address, wreg(rt));
break;
case STLXR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
default:
UNIMPLEMENTED();
}
LogWrite(address, access_size, rt);
set_wreg(rs, 0);
} else {
set_wreg(rs, 1);
}
} else {
local_monitor_.NotifyStore(address);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
switch (op) {
case STLR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
break;
case STLR_h:
MemoryWrite<uint16_t>(address, wreg(rt));
break;
case STLR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
default:
UNIMPLEMENTED();
}
}
}
} }
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
@ -3877,6 +4007,186 @@ void Simulator::DoPrintf(Instruction* instr) {
delete[] format; delete[] format;
} }
Simulator::LocalMonitor::LocalMonitor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
size_(TransactionSize::None) {}
void Simulator::LocalMonitor::Clear() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
size_ = TransactionSize::None;
}
void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive load could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on load.
Clear();
}
}
void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
TransactionSize size) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
size_ = size;
}
void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on store.
Clear();
}
}
bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
TransactionSize size) {
if (access_state_ == MonitorAccess::Exclusive) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.1), so this comparison does not mask addr.
if (addr == tagged_addr_ && size_ == size) {
Clear();
return true;
} else {
// It is implementation-defined whether an exclusive store to a
// non-tagged address will update memory. As a result, it's most strict
// to unconditionally clear the local monitor.
Clear();
return false;
}
} else {
DCHECK(access_state_ == MonitorAccess::Open);
return false;
}
}
Simulator::GlobalMonitor::Processor::Processor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
next_(nullptr),
prev_(nullptr),
failure_counter_(0) {}
void Simulator::GlobalMonitor::Processor::Clear_Locked() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
}
void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
uintptr_t addr) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
}
void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the global monitor. As a result, it's
// most strict to unconditionally clear global monitors on store.
Clear_Locked();
}
}
bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
if (is_requesting_processor) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.2), so this comparison does not mask addr.
if (addr == tagged_addr_) {
Clear_Locked();
// Introduce occasional stxr failures. This is to simulate the
// behavior of hardware, which can randomly fail due to background
// cache evictions.
if (failure_counter_++ >= kMaxFailureCounter) {
failure_counter_ = 0;
return false;
} else {
return true;
}
}
} else if ((addr & kExclusiveTaggedAddrMask) ==
(tagged_addr_ & kExclusiveTaggedAddrMask)) {
// Check the masked addresses when responding to a successful lock by
// another processor so the implementation is more conservative (i.e. the
// granularity of locking is as large as possible.)
Clear_Locked();
return false;
}
}
return false;
}
Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
Processor* processor) {
processor->NotifyLoadExcl_Locked(addr);
PrependProcessor_Locked(processor);
}
void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr,
Processor* processor) {
// Notify each processor of the store operation.
for (Processor* iter = head_; iter; iter = iter->next_) {
bool is_requesting_processor = iter == processor;
iter->NotifyStore_Locked(addr, is_requesting_processor);
}
}
bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
Processor* processor) {
DCHECK(IsProcessorInLinkedList_Locked(processor));
if (processor->NotifyStoreExcl_Locked(addr, true)) {
// Notify the other processors that this StoreExcl succeeded.
for (Processor* iter = head_; iter; iter = iter->next_) {
if (iter != processor) {
iter->NotifyStoreExcl_Locked(addr, false);
}
}
return true;
} else {
return false;
}
}
bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
Processor* processor) const {
return head_ == processor || processor->next_ || processor->prev_;
}
void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
if (IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (head_) {
head_->prev_ = processor;
}
processor->prev_ = nullptr;
processor->next_ = head_;
head_ = processor;
}
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
base::LockGuard<base::Mutex> lock_guard(&mutex);
if (!IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (processor->prev_) {
processor->prev_->next_ = processor->next_;
} else {
head_ = processor->next_;
}
if (processor->next_) {
processor->next_->prev_ = processor->prev_;
}
processor->prev_ = nullptr;
processor->next_ = nullptr;
}
#endif // USE_SIMULATOR #endif // USE_SIMULATOR

91
deps/v8/src/arm64/simulator-arm64.h

@ -865,6 +865,97 @@ class Simulator : public DecoderVisitor {
char* last_debugger_input() { return last_debugger_input_; } char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_; char* last_debugger_input_;
// Synchronization primitives. See ARM DDI 0487A.a, B2.10. Pair types not
// implemented.
enum class MonitorAccess {
Open,
Exclusive,
};
enum class TransactionSize {
None = 0,
Byte = 1,
HalfWord = 2,
Word = 4,
};
TransactionSize get_transaction_size(unsigned size);
// The least-significant bits of the address are ignored. The number of bits
// is implementation-defined, between 3 and 11. See ARM DDI 0487A.a, B2.10.3.
static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
class LocalMonitor {
public:
LocalMonitor();
// These functions manage the state machine for the local monitor, but do
// not actually perform loads and stores. NotifyStoreExcl only returns
// true if the exclusive store is allowed; the global monitor will still
// have to be checked to see whether the memory should be updated.
void NotifyLoad(uintptr_t addr);
void NotifyLoadExcl(uintptr_t addr, TransactionSize size);
void NotifyStore(uintptr_t addr);
bool NotifyStoreExcl(uintptr_t addr, TransactionSize size);
private:
void Clear();
MonitorAccess access_state_;
uintptr_t tagged_addr_;
TransactionSize size_;
};
class GlobalMonitor {
public:
GlobalMonitor();
class Processor {
public:
Processor();
private:
friend class GlobalMonitor;
// These functions manage the state machine for the global monitor, but do
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadExcl_Locked(uintptr_t addr);
void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor);
MonitorAccess access_state_;
uintptr_t tagged_addr_;
Processor* next_;
Processor* prev_;
// A stxr can fail due to background cache evictions. Rather than
// simulating this, we'll just occasionally introduce cases where an
// exclusive store fails. This will happen once after every
// kMaxFailureCounter exclusive stores.
static const int kMaxFailureCounter = 5;
int failure_counter_;
};
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor);
void NotifyStore_Locked(uintptr_t addr, Processor* processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor);
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
private:
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
Processor* head_;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
private: private:
void Init(FILE* stream); void Init(FILE* stream);

138
deps/v8/src/asmjs/asm-js.cc

@ -6,6 +6,7 @@
#include "src/api-natives.h" #include "src/api-natives.h"
#include "src/api.h" #include "src/api.h"
#include "src/asmjs/asm-parser.h"
#include "src/asmjs/asm-typer.h" #include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h" #include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h" #include "src/assert-scope.h"
@ -164,47 +165,90 @@ bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
} // namespace } // namespace
MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) { MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion"); wasm::ZoneBuffer* module = nullptr;
wasm::ZoneBuffer* asm_offsets = nullptr;
Handle<FixedArray> uses_array;
Handle<FixedArray> foreign_globals;
base::ElapsedTimer asm_wasm_timer; base::ElapsedTimer asm_wasm_timer;
asm_wasm_timer.Start(); asm_wasm_timer.Start();
wasm::AsmWasmBuilder builder(info); wasm::AsmWasmBuilder builder(info);
Handle<FixedArray> foreign_globals; if (FLAG_fast_validate_asm) {
auto asm_wasm_result = builder.Run(&foreign_globals); wasm::AsmJsParser parser(info->isolate(), info->zone(), info->script(),
if (!asm_wasm_result.success) { info->literal()->start_position(),
DCHECK(!info->isolate()->has_pending_exception()); info->literal()->end_position());
if (!FLAG_suppress_asm_messages) { if (!parser.Run()) {
MessageHandler::ReportMessage(info->isolate(), DCHECK(!info->isolate()->has_pending_exception());
builder.typer()->message_location(), if (!FLAG_suppress_asm_messages) {
builder.typer()->error_message()); MessageLocation location(info->script(), parser.failure_location(),
parser.failure_location());
Handle<String> message =
info->isolate()
->factory()
->NewStringFromUtf8(CStrVector(parser.failure_message()))
.ToHandleChecked();
Handle<JSMessageObject> error_message =
MessageHandler::MakeMessageObject(
info->isolate(), MessageTemplate::kAsmJsInvalid, &location,
message, Handle<JSArray>::null());
error_message->set_error_level(v8::Isolate::kMessageWarning);
MessageHandler::ReportMessage(info->isolate(), &location,
error_message);
}
return MaybeHandle<FixedArray>();
}
Zone* zone = info->zone();
module = new (zone) wasm::ZoneBuffer(zone);
parser.module_builder()->WriteTo(*module);
asm_offsets = new (zone) wasm::ZoneBuffer(zone);
parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets);
// TODO(bradnelson): Remove foreign_globals plumbing (as we don't need it
// for the new parser).
foreign_globals = info->isolate()->factory()->NewFixedArray(0);
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(parser.stdlib_uses()->size()));
int count = 0;
for (auto i : *parser.stdlib_uses()) {
uses_array->set(count++, Smi::FromInt(i));
}
} else {
auto asm_wasm_result = builder.Run(&foreign_globals);
if (!asm_wasm_result.success) {
DCHECK(!info->isolate()->has_pending_exception());
if (!FLAG_suppress_asm_messages) {
MessageHandler::ReportMessage(info->isolate(),
builder.typer()->message_location(),
builder.typer()->error_message());
}
return MaybeHandle<FixedArray>();
}
module = asm_wasm_result.module_bytes;
asm_offsets = asm_wasm_result.asm_offset_table;
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(uses.size()));
int count = 0;
for (auto i : uses) {
uses_array->set(count++, Smi::FromInt(i));
} }
return MaybeHandle<FixedArray>();
} }
double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
wasm::ZoneBuffer* module = asm_wasm_result.module_bytes; double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
Vector<const byte> asm_offsets_vec(asm_offsets->begin(), Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
static_cast<int>(asm_offsets->size())); static_cast<int>(asm_offsets->size()));
base::ElapsedTimer compile_timer; base::ElapsedTimer compile_timer;
compile_timer.Start(); compile_timer.Start();
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs( MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
info->isolate(), &thrower, info->isolate(), &thrower,
wasm::ModuleWireBytes(module->begin(), module->end()), info->script(), wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
asm_offsets_vec); asm_offsets_vec);
DCHECK(!compiled.is_null()); DCHECK(!compiled.is_null());
DCHECK(!thrower.error());
double compile_time = compile_timer.Elapsed().InMillisecondsF(); double compile_time = compile_timer.Elapsed().InMillisecondsF();
DCHECK_GE(module->end(), module->begin()); DCHECK_GE(module->end(), module->begin());
uintptr_t wasm_size = module->end() - module->begin(); uintptr_t wasm_size = module->end() - module->begin();
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
int count = 0;
for (auto i : uses) {
uses_array->set(count++, Smi::FromInt(i));
}
Handle<FixedArray> result = Handle<FixedArray> result =
info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount); info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked()); result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
@ -264,8 +308,6 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
i::Handle<i::FixedArray> foreign_globals( i::Handle<i::FixedArray> foreign_globals(
i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals))); i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
// Create the ffi object for foreign functions {"": foreign}. // Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object; Handle<JSObject> ffi_object;
if (!foreign.is_null()) { if (!foreign.is_null()) {
@ -276,40 +318,46 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
foreign, NONE); foreign, NONE);
} }
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
i::MaybeHandle<i::Object> maybe_module_object = i::MaybeHandle<i::Object> maybe_module_object =
i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory); i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
if (maybe_module_object.is_null()) { if (maybe_module_object.is_null()) {
thrower.Reify(); // Ensure exceptions do not propagate.
return MaybeHandle<Object>(); return MaybeHandle<Object>();
} }
DCHECK(!thrower.error());
i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked(); i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String( if (!FLAG_fast_validate_asm) {
wasm::AsmWasmBuilder::foreign_init_name)); i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
i::Handle<i::Object> init = wasm::AsmWasmBuilder::foreign_init_name));
i::Object::GetProperty(module_object, init_name).ToHandleChecked(); i::Handle<i::Object> init =
i::Object::GetProperty(module_object, init_name).ToHandleChecked();
i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate); i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
i::Handle<i::Object>* foreign_args_array = i::Handle<i::Object>* foreign_args_array =
new i::Handle<i::Object>[foreign_globals->length()]; new i::Handle<i::Object>[foreign_globals->length()];
for (int j = 0; j < foreign_globals->length(); j++) { for (int j = 0; j < foreign_globals->length(); j++) {
if (!foreign.is_null()) { if (!foreign.is_null()) {
i::MaybeHandle<i::Name> name = i::Object::ToName( i::MaybeHandle<i::Name> name = i::Object::ToName(
isolate, i::Handle<i::Object>(foreign_globals->get(j), isolate)); isolate, i::Handle<i::Object>(foreign_globals->get(j), isolate));
if (!name.is_null()) { if (!name.is_null()) {
i::MaybeHandle<i::Object> val = i::MaybeHandle<i::Object> val =
i::Object::GetProperty(foreign, name.ToHandleChecked()); i::Object::GetProperty(foreign, name.ToHandleChecked());
if (!val.is_null()) { if (!val.is_null()) {
foreign_args_array[j] = val.ToHandleChecked(); foreign_args_array[j] = val.ToHandleChecked();
continue; continue;
}
} }
} }
foreign_args_array[j] = undefined;
} }
foreign_args_array[j] = undefined; i::MaybeHandle<i::Object> retval =
i::Execution::Call(isolate, init, undefined, foreign_globals->length(),
foreign_args_array);
delete[] foreign_args_array;
DCHECK(!retval.is_null());
} }
i::MaybeHandle<i::Object> retval = i::Execution::Call(
isolate, init, undefined, foreign_globals->length(), foreign_args_array);
delete[] foreign_args_array;
DCHECK(!retval.is_null());
i::Handle<i::Name> single_function_name( i::Handle<i::Name> single_function_name(
isolate->factory()->InternalizeUtf8String( isolate->factory()->InternalizeUtf8String(

110
deps/v8/src/asmjs/asm-names.h

@ -0,0 +1,110 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_NAMES_H_
#define V8_ASMJS_ASM_NAMES_H_
#define STDLIB_MATH_VALUE_LIST(V) \
V(E) \
V(LN10) \
V(LN2) \
V(LOG2E) \
V(LOG10E) \
V(PI) \
V(SQRT1_2) \
V(SQRT2)
// V(stdlib.Math.<name>, Name, wasm-opcode, asm-js-type)
#define STDLIB_MATH_FUNCTION_MONOMORPHIC_LIST(V) \
V(acos, Acos, kExprF64Acos, dq2d) \
V(asin, Asin, kExprF64Asin, dq2d) \
V(atan, Atan, kExprF64Atan, dq2d) \
V(cos, Cos, kExprF64Cos, dq2d) \
V(sin, Sin, kExprF64Sin, dq2d) \
V(tan, Tan, kExprF64Tan, dq2d) \
V(exp, Exp, kExprF64Exp, dq2d) \
V(log, Log, kExprF64Log, dq2d) \
V(atan2, Atan2, kExprF64Atan2, dqdq2d) \
V(pow, Pow, kExprF64Pow, dqdq2d) \
V(imul, Imul, kExprI32Mul, ii2s) \
V(clz32, Clz32, kExprI32Clz, i2s)
// V(stdlib.Math.<name>, Name, unused, asm-js-type)
#define STDLIB_MATH_FUNCTION_CEIL_LIKE_LIST(V) \
V(ceil, Ceil, x, ceil_like) \
V(floor, Floor, x, ceil_like) \
V(sqrt, Sqrt, x, ceil_like)
// V(stdlib.Math.<name>, Name, unused, asm-js-type)
#define STDLIB_MATH_FUNCTION_LIST(V) \
V(min, Min, x, minmax) \
V(max, Max, x, minmax) \
V(abs, Abs, x, abs) \
V(fround, Fround, x, fround) \
STDLIB_MATH_FUNCTION_MONOMORPHIC_LIST(V) \
STDLIB_MATH_FUNCTION_CEIL_LIKE_LIST(V)
// V(stdlib.<name>, wasm-load-type, wasm-store-type, wasm-type)
#define STDLIB_ARRAY_TYPE_LIST(V) \
V(Int8Array, Mem8S, Mem8, I32) \
V(Uint8Array, Mem8U, Mem8, I32) \
V(Int16Array, Mem16S, Mem16, I32) \
V(Uint16Array, Mem16U, Mem16, I32) \
V(Int32Array, Mem, Mem, I32) \
V(Uint32Array, Mem, Mem, I32) \
V(Float32Array, Mem, Mem, F32) \
V(Float64Array, Mem, Mem, F64)
#define STDLIB_OTHER_LIST(V) \
V(Infinity) \
V(NaN) \
V(Math)
// clang-format off (for return)
#define KEYWORD_NAME_LIST(V) \
V(arguments) \
V(break) \
V(case) \
V(const) \
V(continue) \
V(default) \
V(do) \
V(else) \
V(eval) \
V(for) \
V(function) \
V(if) \
V(new) \
V(return ) \
V(switch) \
V(var) \
V(while)
// clang-format on
// V(token-string, token-name)
#define LONG_SYMBOL_NAME_LIST(V) \
V("<=", LE) \
V(">=", GE) \
V("==", EQ) \
V("!=", NE) \
V("<<", SHL) \
V(">>", SAR) \
V(">>>", SHR) \
V("'use asm'", UseAsm)
// clang-format off
#define SIMPLE_SINGLE_TOKEN_LIST(V) \
V('+') V('-') V('*') V('%') V('~') V('^') V('&') V('|') V('(') V(')') \
V('[') V(']') V('{') V('}') V(':') V(';') V(',') V('?')
// clang-format on
// V(name, value, string-name)
#define SPECIAL_TOKEN_LIST(V) \
V(kUninitialized, 0, "{uninitalized}") \
V(kEndOfInput, -1, "{end of input}") \
V(kParseError, -2, "{parse error}") \
V(kUnsigned, -3, "{unsigned value}") \
V(kDouble, -4, "{double value}")
#endif

2449
deps/v8/src/asmjs/asm-parser.cc

File diff suppressed because it is too large

316
deps/v8/src/asmjs/asm-parser.h

@ -0,0 +1,316 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_PARSER_H_
#define V8_ASMJS_ASM_PARSER_H_
#include <list>
#include <string>
#include <vector>
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-types.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace wasm {
// A custom parser + validator + wasm converter for asm.js:
// http://asmjs.org/spec/latest/
// This parser intentionally avoids the portion of JavaScript parsing
// that are not required to determine if code is valid asm.js code.
// * It is mostly one pass.
// * It bails out on unexpected input.
// * It assumes strict ordering insofar as permitted by asm.js validation rules.
// * It relies on a custom scanner that provides de-duped identifiers in two
// scopes (local + module wide).
class AsmJsParser {
public:
explicit AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
int start, int end);
bool Run();
const char* failure_message() const { return failure_message_.c_str(); }
int failure_location() const { return failure_location_; }
WasmModuleBuilder* module_builder() { return module_builder_; }
const AsmTyper::StdlibSet* stdlib_uses() const { return &stdlib_uses_; }
private:
// clang-format off
enum class VarKind {
kUnused,
kLocal,
kGlobal,
kSpecial,
kFunction,
kTable,
kImportedFunction,
#define V(_unused0, Name, _unused1, _unused2) kMath##Name,
STDLIB_MATH_FUNCTION_LIST(V)
#undef V
#define V(Name) kMath##Name,
STDLIB_MATH_VALUE_LIST(V)
#undef V
};
// clang-format on
struct FunctionImportInfo {
char* function_name;
size_t function_name_size;
SignatureMap cache;
std::vector<uint32_t> cache_index;
};
struct VarInfo {
AsmType* type;
WasmFunctionBuilder* function_builder;
FunctionImportInfo* import;
int32_t mask;
uint32_t index;
VarKind kind;
bool mutable_variable;
bool function_defined;
VarInfo();
void DeclareGlobalImport(AsmType* type, uint32_t index);
void DeclareStdlibFunc(VarKind kind, AsmType* type);
};
struct GlobalImport {
char* import_name;
size_t import_name_size;
uint32_t import_index;
uint32_t global_index;
bool needs_init;
};
enum class BlockKind { kRegular, kLoop, kOther };
struct BlockInfo {
BlockKind kind;
AsmJsScanner::token_t label;
};
// Helper class to make {TempVariable} safe for nesting.
class TemporaryVariableScope;
Zone* zone_;
AsmJsScanner scanner_;
WasmModuleBuilder* module_builder_;
WasmFunctionBuilder* current_function_builder_;
AsmType* return_type_;
std::uintptr_t stack_limit_;
AsmTyper::StdlibSet stdlib_uses_;
std::list<FunctionImportInfo> function_import_info_;
ZoneVector<VarInfo> global_var_info_;
ZoneVector<VarInfo> local_var_info_;
int function_temp_locals_offset_;
int function_temp_locals_used_;
int function_temp_locals_depth_;
// Error Handling related
bool failed_;
std::string failure_message_;
int failure_location_;
// Module Related.
AsmJsScanner::token_t stdlib_name_;
AsmJsScanner::token_t foreign_name_;
AsmJsScanner::token_t heap_name_;
static const AsmJsScanner::token_t kTokenNone = 0;
// Track if parsing a heap assignment.
bool inside_heap_assignment_;
AsmType* heap_access_type_;
ZoneVector<BlockInfo> block_stack_;
// Types used for stdlib function and their set up.
AsmType* stdlib_dq2d_;
AsmType* stdlib_dqdq2d_;
AsmType* stdlib_fq2f_;
AsmType* stdlib_i2s_;
AsmType* stdlib_ii2s_;
AsmType* stdlib_minmax_;
AsmType* stdlib_abs_;
AsmType* stdlib_ceil_like_;
AsmType* stdlib_fround_;
// When making calls, the return type is needed to lookup signatures.
// For +callsite(..) or fround(callsite(..)) use this value to pass
// along the coercion.
AsmType* call_coercion_;
// The source position associated with the above {call_coercion}.
size_t call_coercion_position_;
// Used to track the last label we've seen so it can be matched to later
// statements it's attached to.
AsmJsScanner::token_t pending_label_;
// Global imports.
// NOTE: Holds the strings referenced in wasm-module-builder for imports.
ZoneLinkedList<GlobalImport> global_imports_;
Zone* zone() { return zone_; }
inline bool Peek(AsmJsScanner::token_t token) {
return scanner_.Token() == token;
}
inline bool Check(AsmJsScanner::token_t token) {
if (scanner_.Token() == token) {
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForZero() {
if (scanner_.IsUnsigned() && scanner_.AsUnsigned() == 0) {
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForDouble(double* value) {
if (scanner_.IsDouble()) {
*value = scanner_.AsDouble();
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForUnsigned(uint64_t* value) {
if (scanner_.IsUnsigned()) {
*value = scanner_.AsUnsigned();
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForUnsignedBelow(uint64_t limit, uint64_t* value) {
if (scanner_.IsUnsigned() && scanner_.AsUnsigned() < limit) {
*value = scanner_.AsUnsigned();
scanner_.Next();
return true;
} else {
return false;
}
}
inline AsmJsScanner::token_t Consume() {
AsmJsScanner::token_t ret = scanner_.Token();
scanner_.Next();
return ret;
}
void SkipSemicolon();
VarInfo* GetVarInfo(AsmJsScanner::token_t token);
uint32_t VarIndex(VarInfo* info);
void DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type,
ValueType vtype,
const WasmInitExpr& init = WasmInitExpr());
// Allocates a temporary local variable. The given {index} is absolute within
// the function body, consider using {TemporaryVariableScope} when nesting.
uint32_t TempVariable(int index);
void AddGlobalImport(std::string name, AsmType* type, ValueType vtype,
bool mutable_variable, VarInfo* info);
// Use to set up block stack layers (including synthetic ones for if-else).
// Begin/Loop/End below are implemented with these plus code generation.
void BareBegin(BlockKind kind = BlockKind::kOther,
AsmJsScanner::token_t label = 0);
void BareEnd();
int FindContinueLabelDepth(AsmJsScanner::token_t label);
int FindBreakLabelDepth(AsmJsScanner::token_t label);
// Use to set up actual wasm blocks/loops.
void Begin(AsmJsScanner::token_t label = 0);
void Loop(AsmJsScanner::token_t label = 0);
void End();
void InitializeStdlibTypes();
FunctionSig* ConvertSignature(AsmType* return_type,
const std::vector<AsmType*>& params);
// 6.1 ValidateModule
void ValidateModule();
void ValidateModuleParameters();
void ValidateModuleVars();
void ValidateModuleVar(bool mutable_variable);
bool ValidateModuleVarImport(VarInfo* info, bool mutable_variable);
void ValidateModuleVarStdlib(VarInfo* info);
void ValidateModuleVarNewStdlib(VarInfo* info);
void ValidateModuleVarFromGlobal(VarInfo* info, bool mutable_variable);
void ValidateExport(); // 6.2 ValidateExport
void ValidateFunctionTable(); // 6.3 ValidateFunctionTable
void ValidateFunction(); // 6.4 ValidateFunction
void ValidateFunctionParams(std::vector<AsmType*>* params);
void ValidateFunctionLocals(size_t param_count,
std::vector<ValueType>* locals);
void ValidateStatement(); // ValidateStatement
void Block(); // 6.5.1 Block
void ExpressionStatement(); // 6.5.2 ExpressionStatement
void EmptyStatement(); // 6.5.3 EmptyStatement
void IfStatement(); // 6.5.4 IfStatement
void ReturnStatement(); // 6.5.5 ReturnStatement
bool IterationStatement(); // 6.5.6 IterationStatement
void WhileStatement(); // 6.5.6 IterationStatement - while
void DoStatement(); // 6.5.6 IterationStatement - do
void ForStatement(); // 6.5.6 IterationStatement - for
void BreakStatement(); // 6.5.7 BreakStatement
void ContinueStatement(); // 6.5.8 ContinueStatement
void LabelledStatement(); // 6.5.9 LabelledStatement
void SwitchStatement(); // 6.5.10 SwitchStatement
void ValidateCase(); // 6.6. ValidateCase
void ValidateDefault(); // 6.7 ValidateDefault
AsmType* ValidateExpression(); // 6.8 ValidateExpression
AsmType* Expression(AsmType* expect); // 6.8.1 Expression
AsmType* NumericLiteral(); // 6.8.2 NumericLiteral
AsmType* Identifier(); // 6.8.3 Identifier
AsmType* CallExpression(); // 6.8.4 CallExpression
AsmType* MemberExpression(); // 6.8.5 MemberExpression
AsmType* AssignmentExpression(); // 6.8.6 AssignmentExpression
AsmType* UnaryExpression(); // 6.8.7 UnaryExpression
AsmType* MultiplicativeExpression(); // 6.8.8 MultaplicativeExpression
AsmType* AdditiveExpression(); // 6.8.9 AdditiveExpression
AsmType* ShiftExpression(); // 6.8.10 ShiftExpression
AsmType* RelationalExpression(); // 6.8.11 RelationalExpression
AsmType* EqualityExpression(); // 6.8.12 EqualityExpression
AsmType* BitwiseANDExpression(); // 6.8.13 BitwiseANDExpression
AsmType* BitwiseXORExpression(); // 6.8.14 BitwiseXORExpression
AsmType* BitwiseORExpression(); // 6.8.15 BitwiseORExpression
AsmType* ConditionalExpression(); // 6.8.16 ConditionalExpression
AsmType* ParenthesizedExpression(); // 6.8.17 ParenthesiedExpression
AsmType* ValidateCall(); // 6.9 ValidateCall
bool PeekCall(); // 6.9 ValidateCall - helper
void ValidateHeapAccess(); // 6.10 ValidateHeapAccess
void ValidateFloatCoercion(); // 6.11 ValidateFloatCoercion
void GatherCases(std::vector<int32_t>* cases);
};
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_ASMJS_ASM_PARSER_H_

431
deps/v8/src/asmjs/asm-scanner.cc

@ -0,0 +1,431 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/asmjs/asm-scanner.h"
#include "src/conversions.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
namespace {
// Cap number of identifiers to ensure we can assign both global and
// local ones a token id in the range of an int32_t.
static const int kMaxIdentifierCount = 0xf000000;
};
AsmJsScanner::AsmJsScanner()
: token_(kUninitialized),
preceding_token_(kUninitialized),
next_token_(kUninitialized),
position_(0),
preceding_position_(0),
next_position_(0),
rewind_(false),
in_local_scope_(false),
global_count_(0),
double_value_(0.0),
unsigned_value_(0),
preceded_by_newline_(false) {
#define V(name, _junk1, _junk2, _junk3) property_names_[#name] = kToken_##name;
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
#define V(name) property_names_[#name] = kToken_##name;
STDLIB_MATH_VALUE_LIST(V)
STDLIB_OTHER_LIST(V)
#undef V
#define V(name) global_names_[#name] = kToken_##name;
KEYWORD_NAME_LIST(V)
#undef V
}
void AsmJsScanner::SetStream(std::unique_ptr<Utf16CharacterStream> stream) {
stream_ = std::move(stream);
Next();
}
void AsmJsScanner::Next() {
if (rewind_) {
preceding_token_ = token_;
preceding_position_ = position_;
token_ = next_token_;
position_ = next_position_;
next_token_ = kUninitialized;
next_position_ = 0;
rewind_ = false;
return;
}
if (token_ == kEndOfInput || token_ == kParseError) {
return;
}
#if DEBUG
if (FLAG_trace_asm_scanner) {
if (Token() == kDouble) {
PrintF("%lf ", AsDouble());
} else if (Token() == kUnsigned) {
PrintF("%" PRIu64 " ", AsUnsigned());
} else {
std::string name = Name(Token());
PrintF("%s ", name.c_str());
}
}
#endif
preceded_by_newline_ = false;
preceding_token_ = token_;
preceding_position_ = position_;
for (;;) {
position_ = stream_->pos();
uc32 ch = stream_->Advance();
switch (ch) {
case ' ':
case '\t':
case '\r':
// Ignore whitespace.
break;
case '\n':
// Track when we've passed a newline for optional semicolon support,
// but keep scanning.
preceded_by_newline_ = true;
break;
case kEndOfInput:
token_ = kEndOfInput;
return;
case '\'':
case '"':
ConsumeString(ch);
return;
case '/':
ch = stream_->Advance();
if (ch == '/') {
ConsumeCPPComment();
} else if (ch == '*') {
if (!ConsumeCComment()) {
token_ = kParseError;
return;
}
} else {
stream_->Back();
token_ = '/';
return;
}
// Breaks out of switch, but loops again (i.e. the case when we parsed
// a comment, but need to continue to look for the next token).
break;
case '<':
case '>':
case '=':
case '!':
ConsumeCompareOrShift(ch);
return;
#define V(single_char_token) case single_char_token:
SIMPLE_SINGLE_TOKEN_LIST(V)
#undef V
// Use fixed token IDs for ASCII.
token_ = ch;
return;
default:
if (IsIdentifierStart(ch)) {
ConsumeIdentifier(ch);
} else if (IsNumberStart(ch)) {
ConsumeNumber(ch);
} else {
// TODO(bradnelson): Support unicode (probably via UnicodeCache).
token_ = kParseError;
}
return;
}
}
}
void AsmJsScanner::Rewind() {
DCHECK_NE(kUninitialized, preceding_token_);
// TODO(bradnelson): Currently rewinding needs to leave in place the
// preceding newline state (in case a |0 ends a line).
// This is weird and stateful, fix me.
DCHECK(!rewind_);
next_token_ = token_;
next_position_ = position_;
token_ = preceding_token_;
position_ = preceding_position_;
preceding_token_ = kUninitialized;
preceding_position_ = 0;
rewind_ = true;
identifier_string_.clear();
}
void AsmJsScanner::ResetLocals() { local_names_.clear(); }
#if DEBUG
// Only used for debugging.
std::string AsmJsScanner::Name(token_t token) const {
if (token >= 32 && token < 127) {
return std::string(1, static_cast<char>(token));
}
for (auto& i : local_names_) {
if (i.second == token) {
return i.first;
}
}
for (auto& i : global_names_) {
if (i.second == token) {
return i.first;
}
}
for (auto& i : property_names_) {
if (i.second == token) {
return i.first;
}
}
switch (token) {
#define V(rawname, name) \
case kToken_##name: \
return rawname;
LONG_SYMBOL_NAME_LIST(V)
#undef V
#define V(name, value, string_name) \
case name: \
return string_name;
SPECIAL_TOKEN_LIST(V)
default:
break;
}
UNREACHABLE();
return "{unreachable}";
}
#endif
int AsmJsScanner::GetPosition() const {
DCHECK(!rewind_);
return static_cast<int>(stream_->pos());
}
void AsmJsScanner::Seek(int pos) {
stream_->Seek(pos);
preceding_token_ = kUninitialized;
token_ = kUninitialized;
next_token_ = kUninitialized;
preceding_position_ = 0;
position_ = 0;
next_position_ = 0;
rewind_ = false;
Next();
}
void AsmJsScanner::ConsumeIdentifier(uc32 ch) {
// Consume characters while still part of the identifier.
identifier_string_.clear();
while (IsIdentifierPart(ch)) {
identifier_string_ += ch;
ch = stream_->Advance();
}
// Go back one for next time.
stream_->Back();
// Decode what the identifier means.
if (preceding_token_ == '.') {
auto i = property_names_.find(identifier_string_);
if (i != property_names_.end()) {
token_ = i->second;
return;
}
} else {
{
auto i = local_names_.find(identifier_string_);
if (i != local_names_.end()) {
token_ = i->second;
return;
}
}
if (!in_local_scope_) {
auto i = global_names_.find(identifier_string_);
if (i != global_names_.end()) {
token_ = i->second;
return;
}
}
}
if (preceding_token_ == '.') {
CHECK(global_count_ < kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
property_names_[identifier_string_] = token_;
} else if (in_local_scope_) {
CHECK(local_names_.size() < kMaxIdentifierCount);
token_ = kLocalsStart - static_cast<token_t>(local_names_.size());
local_names_[identifier_string_] = token_;
} else {
CHECK(global_count_ < kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
global_names_[identifier_string_] = token_;
}
}
void AsmJsScanner::ConsumeNumber(uc32 ch) {
std::string number;
number = ch;
bool has_dot = ch == '.';
for (;;) {
ch = stream_->Advance();
if ((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F') || ch == '.' || ch == 'b' || ch == 'o' ||
ch == 'x' ||
((ch == '-' || ch == '+') && (number[number.size() - 1] == 'e' ||
number[number.size() - 1] == 'E'))) {
// TODO(bradnelson): Test weird cases ending in -.
if (ch == '.') {
has_dot = true;
}
number.push_back(ch);
} else {
break;
}
}
stream_->Back();
// Special case the most common number.
if (number.size() == 1 && number[0] == '0') {
unsigned_value_ = 0;
token_ = kUnsigned;
return;
}
// Pick out dot.
if (number.size() == 1 && number[0] == '.') {
token_ = '.';
return;
}
// Decode numbers.
UnicodeCache cache;
double_value_ = StringToDouble(
&cache,
Vector<uint8_t>(
const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(number.data())),
static_cast<int>(number.size())),
ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL);
if (std::isnan(double_value_)) {
// Check if string to number conversion didn't consume all the characters.
// This happens if the character filter let through something invalid
// like: 0123ef for example.
// TODO(bradnelson): Check if this happens often enough to be a perf
// problem.
if (number[0] == '.') {
for (size_t k = 1; k < number.size(); ++k) {
stream_->Back();
}
token_ = '.';
return;
}
// Anything else that doesn't parse is an error.
token_ = kParseError;
return;
}
if (has_dot) {
token_ = kDouble;
} else {
unsigned_value_ = static_cast<uint32_t>(double_value_);
token_ = kUnsigned;
}
}
bool AsmJsScanner::ConsumeCComment() {
for (;;) {
uc32 ch = stream_->Advance();
while (ch == '*') {
ch = stream_->Advance();
if (ch == '/') {
return true;
}
}
if (ch == kEndOfInput) {
return false;
}
}
}
void AsmJsScanner::ConsumeCPPComment() {
for (;;) {
uc32 ch = stream_->Advance();
if (ch == '\n' || ch == kEndOfInput) {
return;
}
}
}
void AsmJsScanner::ConsumeString(uc32 quote) {
// Only string allowed is 'use asm' / "use asm".
const char* expected = "use asm";
for (; *expected != '\0'; ++expected) {
if (stream_->Advance() != *expected) {
token_ = kParseError;
return;
}
}
if (stream_->Advance() != quote) {
token_ = kParseError;
return;
}
token_ = kToken_UseAsm;
}
void AsmJsScanner::ConsumeCompareOrShift(uc32 ch) {
uc32 next_ch = stream_->Advance();
if (next_ch == '=') {
switch (ch) {
case '<':
token_ = kToken_LE;
break;
case '>':
token_ = kToken_GE;
break;
case '=':
token_ = kToken_EQ;
break;
case '!':
token_ = kToken_NE;
break;
default:
UNREACHABLE();
}
} else if (ch == '<' && next_ch == '<') {
token_ = kToken_SHL;
} else if (ch == '>' && next_ch == '>') {
if (stream_->Advance() == '>') {
token_ = kToken_SHR;
} else {
token_ = kToken_SAR;
stream_->Back();
}
} else {
stream_->Back();
token_ = ch;
}
}
bool AsmJsScanner::IsIdentifierStart(uc32 ch) {
return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_' ||
ch == '$';
}
bool AsmJsScanner::IsIdentifierPart(uc32 ch) {
return IsIdentifierStart(ch) || (ch >= '0' && ch <= '9');
}
bool AsmJsScanner::IsNumberStart(uc32 ch) {
return ch == '.' || (ch >= '0' && ch <= '9');
}
} // namespace internal
} // namespace v8

165
deps/v8/src/asmjs/asm-scanner.h

@ -0,0 +1,165 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_SCANNER_H_
#define V8_ASMJS_ASM_SCANNER_H_
#include <memory>
#include <string>
#include <unordered_map>
#include "src/asmjs/asm-names.h"
#include "src/base/logging.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
class Utf16CharacterStream;
// A custom scanner to extract the token stream needed to parse valid
// asm.js: http://asmjs.org/spec/latest/
// This scanner intentionally avoids the portion of JavaScript lexing
// that are not required to determine if code is valid asm.js code.
// * Strings are disallowed except for 'use asm'.
// * Only the subset of keywords needed to check asm.js invariants are
// included.
// * Identifiers are accumulated into local + global string tables
// (for performance).
class V8_EXPORT_PRIVATE AsmJsScanner {
public:
typedef int32_t token_t;
AsmJsScanner();
// Pick the stream to parse (must be called before anything else).
void SetStream(std::unique_ptr<Utf16CharacterStream> stream);
// Get current token.
token_t Token() const { return token_; }
// Get position of current token.
size_t Position() const { return position_; }
// Advance to the next token.
void Next();
// Back up by one token.
void Rewind();
// Get raw string for current identifier.
const std::string& GetIdentifierString() const {
// Identifier strings don't work after a rewind.
DCHECK(!rewind_);
return identifier_string_;
}
// Check if we just passed a newline.
bool IsPrecededByNewline() const {
// Newline tracking doesn't work if you back up.
DCHECK(!rewind_);
return preceded_by_newline_;
}
#if DEBUG
// Debug only method to go from a token back to its name.
// Slow, only use for debugging.
std::string Name(token_t token) const;
#endif
// Get current position (to use with Seek).
int GetPosition() const;
// Restores old position (token after that position).
void Seek(int pos);
// Select whether identifiers are resolved in global or local scope,
// and which scope new identifiers are added to.
void EnterLocalScope() { in_local_scope_ = true; }
void EnterGlobalScope() { in_local_scope_ = false; }
// Drop all current local identifiers.
void ResetLocals();
// Methods to check if a token is an identifier and which scope.
bool IsLocal() const { return IsLocal(Token()); }
bool IsGlobal() const { return IsGlobal(Token()); }
static bool IsLocal(token_t token) { return token <= kLocalsStart; }
static bool IsGlobal(token_t token) { return token >= kGlobalsStart; }
// Methods to find the index position of an identifier (count starting from
// 0 for each scope separately).
static size_t LocalIndex(token_t token) {
DCHECK(IsLocal(token));
return -(token - kLocalsStart);
}
static size_t GlobalIndex(token_t token) {
DCHECK(IsGlobal(token));
return token - kGlobalsStart;
}
// Methods to check if the current token is an asm.js "number" (contains a
// dot) or an "unsigned" (a number without a dot).
bool IsUnsigned() const { return Token() == kUnsigned; }
uint64_t AsUnsigned() const { return unsigned_value_; }
bool IsDouble() const { return Token() == kDouble; }
double AsDouble() const { return double_value_; }
// clang-format off
enum {
// [-10000-kMaxIdentifierCount, -10000) :: Local identifiers (counting
// backwards)
// [-10000 .. -1) :: Builtin tokens like keywords
// (also includes some special
// ones like end of input)
// 0 .. 255 :: Single char tokens
// 256 .. 256+kMaxIdentifierCount :: Global identifiers
kLocalsStart = -10000,
#define V(name, _junk1, _junk2, _junk3) kToken_##name,
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
#define V(name) kToken_##name,
STDLIB_OTHER_LIST(V)
STDLIB_MATH_VALUE_LIST(V)
KEYWORD_NAME_LIST(V)
#undef V
#define V(rawname, name) kToken_##name,
LONG_SYMBOL_NAME_LIST(V)
#undef V
#define V(name, value, string_name) name = value,
SPECIAL_TOKEN_LIST(V)
#undef V
kGlobalsStart = 256,
};
// clang-format on
private:
std::unique_ptr<Utf16CharacterStream> stream_;
token_t token_;
token_t preceding_token_;
token_t next_token_; // Only set when in {rewind} state.
size_t position_; // Corresponds to {token} position.
size_t preceding_position_; // Corresponds to {preceding_token} position.
size_t next_position_; // Only set when in {rewind} state.
bool rewind_;
std::string identifier_string_;
bool in_local_scope_;
std::unordered_map<std::string, token_t> local_names_;
std::unordered_map<std::string, token_t> global_names_;
std::unordered_map<std::string, token_t> property_names_;
int global_count_;
double double_value_;
uint64_t unsigned_value_;
bool preceded_by_newline_;
// Consume multiple characters.
void ConsumeIdentifier(uc32 ch);
void ConsumeNumber(uc32 ch);
bool ConsumeCComment();
void ConsumeCPPComment();
void ConsumeString(uc32 quote);
void ConsumeCompareOrShift(uc32 ch);
// Classify character categories.
bool IsIdentifierStart(uc32 ch);
bool IsIdentifierPart(uc32 ch);
bool IsNumberStart(uc32 ch);
};
} // namespace internal
} // namespace v8
#endif // V8_ASMJS_ASM_SCANNER_H_

12
deps/v8/src/asmjs/asm-wasm-builder.cc

@ -91,6 +91,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
FunctionSig::Builder b(zone(), 0, 0); FunctionSig::Builder b(zone(), 0, 0);
init_function_ = builder_->AddFunction(b.Build()); init_function_ = builder_->AddFunction(b.Build());
builder_->MarkStartFunction(init_function_); builder_->MarkStartFunction(init_function_);
// Record start of the function, used as position for the stack check.
init_function_->SetAsmFunctionStartPosition(literal_->start_position());
} }
void BuildForeignInitFunction() { void BuildForeignInitFunction() {
@ -170,7 +172,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
new_func_scope = new (info->zone()) DeclarationScope( new_func_scope = new (info->zone()) DeclarationScope(
info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE); info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info->set_asm_function_scope(new_func_scope); info->set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(info.get())) { if (!Compiler::ParseAndAnalyze(info.get(), info_->isolate())) {
decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope); decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
if (isolate_->has_pending_exception()) { if (isolate_->has_pending_exception()) {
isolate_->clear_pending_exception(); isolate_->clear_pending_exception();
@ -224,6 +226,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
RECURSE(Visit(stmt)); RECURSE(Visit(stmt));
if (typer_failed_) break; if (typer_failed_) break;
// Not stopping when a jump statement is found.
} }
} }
@ -300,6 +303,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); } void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void VisitImportCallExpression(ImportCallExpression* expr) { UNREACHABLE(); }
void VisitIfStatement(IfStatement* stmt) { void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_); DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition())); RECURSE(Visit(stmt->condition()));
@ -1066,7 +1071,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (as_init) UnLoadInitFunction(); if (as_init) UnLoadInitFunction();
} }
void VisitYield(Yield* expr) { UNREACHABLE(); } void VisitSuspend(Suspend* expr) { UNREACHABLE(); }
void VisitThrow(Throw* expr) { UNREACHABLE(); } void VisitThrow(Throw* expr) { UNREACHABLE(); }
@ -2001,6 +2006,9 @@ AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
info_->parse_info()->ast_value_factory(), info_->parse_info()->ast_value_factory(),
info_->script(), info_->literal(), &typer_); info_->script(), info_->literal(), &typer_);
bool success = impl.Build(); bool success = impl.Build();
if (!success) {
return {nullptr, nullptr, success};
}
*foreign_args = impl.GetForeignArgs(); *foreign_args = impl.GetForeignArgs();
ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone); ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteTo(*module_buffer); impl.builder_->WriteTo(*module_buffer);

130
deps/v8/src/assembler.cc

@ -138,35 +138,40 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of AssemblerBase // Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
: isolate_(isolate), : serializer_enabled_(isolate->serializer_enabled())
jit_cookie_(0), #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
,
max_old_generation_size_(isolate->heap()->MaxOldGenerationSize())
#endif
#if V8_TARGET_ARCH_X64
,
code_range_start_(
isolate->heap()->memory_allocator()->code_range()->start())
#endif
{
}
AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
int buffer_size)
: isolate_data_(isolate_data),
enabled_cpu_features_(0), enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code), emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false), predictable_code_size_(false),
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
constant_pool_available_(false) { constant_pool_available_(false) {
DCHECK_NOT_NULL(isolate);
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
own_buffer_ = buffer == NULL; own_buffer_ = buffer == NULL;
if (buffer_size == 0) buffer_size = kMinimalBufferSize; if (buffer_size == 0) buffer_size = kMinimalBufferSize;
DCHECK(buffer_size > 0); DCHECK(buffer_size > 0);
if (own_buffer_) buffer = NewArray<byte>(buffer_size); if (own_buffer_) buffer = NewArray<byte>(buffer_size);
buffer_ = static_cast<byte*>(buffer); buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size; buffer_size_ = buffer_size;
pc_ = buffer_; pc_ = buffer_;
} }
AssemblerBase::~AssemblerBase() { AssemblerBase::~AssemblerBase() {
if (own_buffer_) DeleteArray(buffer_); if (own_buffer_) DeleteArray(buffer_);
} }
void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) { void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
if (size == 0) return; if (size == 0) return;
@ -178,10 +183,9 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
#endif // USE_SIMULATOR #endif // USE_SIMULATOR
} }
void AssemblerBase::Print(Isolate* isolate) {
void AssemblerBase::Print() {
OFStream os(stdout); OFStream os(stdout);
v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr); v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr);
} }
@ -308,68 +312,62 @@ const int kCodeWithIdTag = 0;
const int kDeoptReasonTag = 1; const int kDeoptReasonTag = 1;
void RelocInfo::update_wasm_memory_reference( void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) { Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_)); DCHECK(IsWasmMemoryReference(rmode_));
DCHECK_GE(wasm_memory_reference(), old_base); DCHECK_GE(wasm_memory_reference(), old_base);
Address updated_reference = new_base + (wasm_memory_reference() - old_base); Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references // The reference is not checked here but at runtime. Validity of references
// may change over time. // may change over time.
unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode); unchecked_update_wasm_memory_reference(isolate, updated_reference,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) { icache_flush_mode);
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
} }
void RelocInfo::update_wasm_memory_size(uint32_t old_size, uint32_t new_size, void RelocInfo::update_wasm_memory_size(Isolate* isolate, uint32_t old_size,
uint32_t new_size,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemorySizeReference(rmode_)); DCHECK(IsWasmMemorySizeReference(rmode_));
uint32_t current_size_reference = wasm_memory_size_reference(); uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference = uint32_t updated_size_reference =
new_size + (current_size_reference - old_size); new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode); unchecked_update_wasm_size(isolate, updated_size_reference,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) { icache_flush_mode);
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
} }
void RelocInfo::update_wasm_global_reference( void RelocInfo::update_wasm_global_reference(
Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) { Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmGlobalReference(rmode_)); DCHECK(IsWasmGlobalReference(rmode_));
Address updated_reference; Address updated_reference;
DCHECK(reinterpret_cast<uintptr_t>(old_base) <= DCHECK_LE(old_base, wasm_global_reference());
reinterpret_cast<uintptr_t>(wasm_global_reference()));
updated_reference = new_base + (wasm_global_reference() - old_base); updated_reference = new_base + (wasm_global_reference() - old_base);
DCHECK(reinterpret_cast<uintptr_t>(new_base) <= DCHECK_LE(new_base, updated_reference);
reinterpret_cast<uintptr_t>(updated_reference)); unchecked_update_wasm_memory_reference(isolate, updated_reference,
unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode); icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
}
} }
void RelocInfo::update_wasm_function_table_size_reference( void RelocInfo::update_wasm_function_table_size_reference(
uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) { Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_)); DCHECK(IsWasmFunctionTableSizeReference(rmode_));
uint32_t current_size_reference = wasm_function_table_size_reference(); uint32_t current_size_reference = wasm_function_table_size_reference();
uint32_t updated_size_reference = uint32_t updated_size_reference =
new_size + (current_size_reference - old_size); new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode); unchecked_update_wasm_size(isolate, updated_size_reference,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) { icache_flush_mode);
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
} }
void RelocInfo::set_target_address(Address target, void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target, Assembler::set_target_address_at(isolate, pc_, host_, target,
icache_flush_mode); icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL && if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) { IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target); Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
host(), this, HeapObject::cast(target_code)); target_code);
} }
} }
@ -652,9 +650,7 @@ void RelocIterator::next() {
done_ = true; done_ = true;
} }
RelocIterator::RelocIterator(Code* code, int mode_mask) {
RelocIterator::RelocIterator(Code* code, int mode_mask)
: rinfo_(code->map()->GetIsolate()) {
rinfo_.host_ = code; rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start(); rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0; rinfo_.data_ = 0;
@ -677,9 +673,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask)
next(); next();
} }
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: rinfo_(desc.origin->isolate()) {
rinfo_.pc_ = desc.buffer; rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0; rinfo_.data_ = 0;
// Relocation info is read backwards. // Relocation info is read backwards.
@ -702,7 +696,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
} }
#ifdef DEBUG #ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) { bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the // Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code // deoptimization entries, they would require relocation after code
// generation. // generation.
@ -1234,6 +1228,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address()); return ExternalReference(isolate->regexp_stack()->limit_address());
} }
ExternalReference ExternalReference::address_of_regexp_dotall_flag(
Isolate* isolate) {
return ExternalReference(&FLAG_harmony_regexp_dotall);
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address()); return ExternalReference(isolate->heap()->store_buffer_top_address());
} }
@ -1546,6 +1545,23 @@ ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr))); return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
} }
void* libc_memcpy(void* dest, const void* src, size_t n) {
return memcpy(dest, src, n);
}
ExternalReference ExternalReference::libc_memcpy_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memcpy)));
}
void* libc_memset(void* dest, int byte, size_t n) {
DCHECK_EQ(static_cast<char>(byte), byte);
return memset(dest, byte, n);
}
ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
}
ExternalReference ExternalReference::page_flags(Page* page) { ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) + return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset); MemoryChunk::kFlagsOffset);
@ -1902,13 +1918,11 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
void Assembler::RecordDeoptReason(DeoptimizeReason reason, void Assembler::RecordDeoptReason(DeoptimizeReason reason,
SourcePosition position, int id) { SourcePosition position, int id) {
if (FLAG_trace_deopt || isolate()->is_profiling()) { EnsureSpace ensure_space(this);
EnsureSpace ensure_space(this); RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset()); RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId()); RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason)); RecordRelocInfo(RelocInfo::DEOPT_ID, id);
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
} }

81
deps/v8/src/assembler.h

@ -64,18 +64,30 @@ enum class CodeObjectRequired { kNo, kYes };
class AssemblerBase: public Malloced { class AssemblerBase: public Malloced {
public: public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size); struct IsolateData {
explicit IsolateData(Isolate* isolate);
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
size_t max_old_generation_size_;
#endif
#if V8_TARGET_ARCH_X64
Address code_range_start_;
#endif
};
AssemblerBase(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~AssemblerBase(); virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; } IsolateData isolate_data() const { return isolate_data_; }
int jit_cookie() const { return jit_cookie_; }
bool serializer_enabled() const { return isolate_data_.serializer_enabled_; }
void enable_serializer() { isolate_data_.serializer_enabled_ = true; }
bool emit_debug_code() const { return emit_debug_code_; } bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; } void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
bool serializer_enabled() const { return serializer_enabled_; }
void enable_serializer() { serializer_enabled_ = true; }
bool predictable_code_size() const { return predictable_code_size_; } bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; } void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@ -113,7 +125,7 @@ class AssemblerBase: public Malloced {
virtual void AbortedCodeGeneration() { } virtual void AbortedCodeGeneration() { }
// Debugging // Debugging
void Print(); void Print(Isolate* isolate);
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
@ -139,12 +151,10 @@ class AssemblerBase: public Malloced {
byte* pc_; byte* pc_;
private: private:
Isolate* isolate_; IsolateData isolate_data_;
int jit_cookie_;
uint64_t enabled_cpu_features_; uint64_t enabled_cpu_features_;
bool emit_debug_code_; bool emit_debug_code_;
bool predictable_code_size_; bool predictable_code_size_;
bool serializer_enabled_;
// Indicates whether the constant pool can be accessed, which is only possible // Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool. // if the pp register points to the current code object's constant pool.
@ -241,7 +251,7 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsCrankshaft(); static inline bool SupportsCrankshaft();
static inline bool SupportsSimd128(); static inline bool SupportsWasmSimd128();
static inline unsigned icache_line_size() { static inline unsigned icache_line_size() {
DCHECK(icache_line_size_ != 0); DCHECK(icache_line_size_ != 0);
@ -372,14 +382,10 @@ class RelocInfo {
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt); STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
explicit RelocInfo(Isolate* isolate) : isolate_(isolate) { RelocInfo() = default;
DCHECK_NOT_NULL(isolate);
}
RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host) RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) { : pc_(pc), rmode_(rmode), data_(data), host_(host) {}
DCHECK_NOT_NULL(isolate);
}
static inline bool IsRealRelocMode(Mode mode) { static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE; return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
@ -478,7 +484,6 @@ class RelocInfo {
static inline int ModeMask(Mode mode) { return 1 << mode; } static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors // Accessors
Isolate* isolate() const { return isolate_; }
byte* pc() const { return pc_; } byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; } void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; } Mode rmode() const { return rmode_; }
@ -506,34 +511,34 @@ class RelocInfo {
uint32_t wasm_function_table_size_reference(); uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference(); uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference( void update_wasm_memory_reference(
Address old_base, Address new_base, Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_memory_size( void update_wasm_memory_size(
uint32_t old_size, uint32_t new_size, Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference( void update_wasm_global_reference(
Address old_base, Address new_base, Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference( void update_wasm_function_table_size_reference(
uint32_t old_base, uint32_t new_base, Isolate* isolate, uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address( void set_target_address(
Address target, Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to; // this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address()); INLINE(Address target_address());
INLINE(Object* target_object()); INLINE(HeapObject* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin)); INLINE(Handle<HeapObject> target_object_handle(Assembler* origin));
INLINE(void set_target_object( INLINE(void set_target_object(
Object* target, HeapObject* target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin)); INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry( INLINE(void set_target_runtime_entry(
Address target, Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell()); INLINE(Cell* target_cell());
@ -541,7 +546,7 @@ class RelocInfo {
INLINE(void set_target_cell( INLINE(void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin)); INLINE(Handle<Code> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub()); INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub( INLINE(void set_code_age_stub(
Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)); Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
@ -585,11 +590,11 @@ class RelocInfo {
// the break points where straight-line code is patched with a call // the break points where straight-line code is patched with a call
// instruction. // instruction.
INLINE(Address debug_call_address()); INLINE(Address debug_call_address());
INLINE(void set_debug_call_address(Address target)); INLINE(void set_debug_call_address(Isolate*, Address target));
// Wipe out a relocation to a fixed value, used for making snapshots // Wipe out a relocation to a fixed value, used for making snapshots
// reproducible. // reproducible.
INLINE(void WipeOut()); INLINE(void WipeOut(Isolate* isolate));
template<typename StaticVisitor> inline void Visit(Heap* heap); template<typename StaticVisitor> inline void Visit(Heap* heap);
@ -603,7 +608,7 @@ class RelocInfo {
#ifdef DEBUG #ifdef DEBUG
// Check whether the given code contains relocation information that // Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector. // either is position-relative or movable by the garbage collector.
static bool RequiresRelocation(const CodeDesc& desc); static bool RequiresRelocation(Isolate* isolate, const CodeDesc& desc);
#endif #endif
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
@ -623,11 +628,11 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch. static const int kApplyMask; // Modes affected by apply. Depends on arch.
private: private:
void unchecked_update_wasm_memory_reference(Address address, void unchecked_update_wasm_memory_reference(Isolate* isolate, Address address,
ICacheFlushMode flush_mode); ICacheFlushMode flush_mode);
void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode); void unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode);
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry // On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction // to be relocated and not the address of the instruction
// referencing the constant pool entry (except when rmode_ == // referencing the constant pool entry (except when rmode_ ==
@ -918,6 +923,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address() // Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate); static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
// Direct access to FLAG_harmony_regexp_dotall.
static ExternalReference address_of_regexp_dotall_flag(Isolate* isolate);
// Static variables for RegExp. // Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector(Isolate* isolate); static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_address( static ExternalReference address_of_regexp_stack_memory_address(
@ -981,6 +989,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference ieee754_tanh_function(Isolate* isolate); static ExternalReference ieee754_tanh_function(Isolate* isolate);
static ExternalReference libc_memchr_function(Isolate* isolate); static ExternalReference libc_memchr_function(Isolate* isolate);
static ExternalReference libc_memcpy_function(Isolate* isolate);
static ExternalReference libc_memset_function(Isolate* isolate);
static ExternalReference page_flags(Page* page); static ExternalReference page_flags(Page* page);
@ -1076,7 +1086,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Utility functions // Utility functions
void* libc_memchr(void* string, int character, size_t search_length);
inline int NumberOfBitsSet(uint32_t x) { inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set; unsigned int num_bits_set;

9
deps/v8/src/ast/ast-expression-rewriter.cc

@ -265,8 +265,7 @@ void AstExpressionRewriter::VisitAssignment(Assignment* node) {
AST_REWRITE_PROPERTY(Expression, node, value); AST_REWRITE_PROPERTY(Expression, node, value);
} }
void AstExpressionRewriter::VisitSuspend(Suspend* node) {
void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node); REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, generator_object); AST_REWRITE_PROPERTY(Expression, node, generator_object);
AST_REWRITE_PROPERTY(Expression, node, expression); AST_REWRITE_PROPERTY(Expression, node, expression);
@ -377,6 +376,12 @@ void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable); AST_REWRITE_PROPERTY(Expression, node, iterable);
} }
void AstExpressionRewriter::VisitImportCallExpression(
ImportCallExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, argument);
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) { void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node); REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Block, node, block); AST_REWRITE_PROPERTY(Block, node, block);

75
deps/v8/src/ast/ast-numbering.cc

@ -15,17 +15,19 @@ namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> { class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public: public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone, AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals) Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile = false)
: zone_(zone), : zone_(zone),
eager_literals_(eager_literals), eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()), next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0), suspend_count_(0),
properties_(zone), properties_(zone),
language_mode_(SLOPPY), language_mode_(SLOPPY),
slot_cache_(zone), slot_cache_(zone),
disable_crankshaft_reason_(kNoReason), disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason), dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT) { catch_prediction_(HandlerTable::UNCAUGHT),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit); InitializeAstVisitor(stack_limit);
} }
@ -93,7 +95,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_; Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_; Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_; int next_id_;
int yield_count_; int suspend_count_;
AstProperties properties_; AstProperties properties_;
LanguageMode language_mode_; LanguageMode language_mode_;
// The slot cache allows us to reuse certain feedback slots. // The slot cache allows us to reuse certain feedback slots.
@ -101,6 +103,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
BailoutReason disable_crankshaft_reason_; BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_; BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_; HandlerTable::CatchPrediction catch_prediction_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor); DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@ -238,12 +241,11 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
properties_.flags() & AstProperties::kMustUseIgnitionTurbo); properties_.flags() & AstProperties::kMustUseIgnitionTurbo);
} }
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
void AstNumberingVisitor::VisitYield(Yield* node) { node->set_suspend_id(suspend_count_);
node->set_yield_id(yield_count_); suspend_count_++;
yield_count_++;
IncrementNodeCount(); IncrementNodeCount();
node->set_base_id(ReserveIdRange(Yield::num_ids())); node->set_base_id(ReserveIdRange(Suspend::num_ids()));
Visit(node->generator_object()); Visit(node->generator_object());
Visit(node->expression()); Visit(node->expression());
} }
@ -322,10 +324,17 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
// has to stash it somewhere. Changing the runtime function into another // has to stash it somewhere. Changing the runtime function into another
// one in ast-numbering seemed like a simple and straightforward solution to // one in ast-numbering seemed like a simple and straightforward solution to
// that problem. // that problem.
if (node->is_jsruntime() && if (node->is_jsruntime() && catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
node->context_index() == Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX && switch (node->context_index()) {
catch_prediction_ == HandlerTable::ASYNC_AWAIT) { case Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX:
node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX); node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
break;
case Context::ASYNC_GENERATOR_AWAIT_CAUGHT:
node->set_context_index(Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
break;
default:
break;
}
} }
} }
@ -342,10 +351,10 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableSelfOptimization(); DisableSelfOptimization();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids())); node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
node->set_first_yield_id(yield_count_); node->set_first_suspend_id(suspend_count_);
Visit(node->body()); Visit(node->body());
Visit(node->cond()); Visit(node->cond());
node->set_yield_count(yield_count_ - node->first_yield_id()); node->set_suspend_count(suspend_count_ - node->first_suspend_id());
} }
@ -353,10 +362,10 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableSelfOptimization(); DisableSelfOptimization();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids())); node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
node->set_first_yield_id(yield_count_); node->set_first_suspend_id(suspend_count_);
Visit(node->cond()); Visit(node->cond());
Visit(node->body()); Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id()); node->set_suspend_count(suspend_count_ - node->first_suspend_id());
} }
@ -463,15 +472,22 @@ void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
ReserveFeedbackSlots(node); ReserveFeedbackSlots(node);
} }
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) { void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableSelfOptimization(); DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids())); node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->enumerable()); // Not part of loop. Visit(node->enumerable()); // Not part of loop.
node->set_first_yield_id(yield_count_); node->set_first_suspend_id(suspend_count_);
Visit(node->each()); Visit(node->each());
Visit(node->body()); Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id()); node->set_suspend_count(suspend_count_ - node->first_suspend_id());
ReserveFeedbackSlots(node); ReserveFeedbackSlots(node);
} }
@ -481,12 +497,12 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
DisableFullCodegenAndCrankshaft(kForOfStatement); DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids())); node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator()); // Not part of loop. Visit(node->assign_iterator()); // Not part of loop.
node->set_first_yield_id(yield_count_); node->set_first_suspend_id(suspend_count_);
Visit(node->next_result()); Visit(node->next_result());
Visit(node->result_done()); Visit(node->result_done());
Visit(node->assign_each()); Visit(node->assign_each());
Visit(node->body()); Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id()); node->set_suspend_count(suspend_count_ - node->first_suspend_id());
} }
@ -535,11 +551,11 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
DisableSelfOptimization(); DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForStatement::num_ids())); node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
if (node->init() != NULL) Visit(node->init()); // Not part of loop. if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_yield_id(yield_count_); node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond()); if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next()); if (node->next() != NULL) Visit(node->next());
Visit(node->body()); Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id()); node->set_suspend_count(suspend_count_ - node->first_suspend_id());
} }
@ -616,6 +632,7 @@ void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == NULL) return; if (statements == NULL) return;
for (int i = 0; i < statements->length(); i++) { for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i)); Visit(statements->at(i));
if (statements->at(i)->IsJump()) break;
} }
} }
@ -687,12 +704,16 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
LanguageModeScope language_mode_scope(this, node->language_mode()); LanguageModeScope language_mode_scope(this, node->language_mode());
if (collect_type_profile_) {
properties_.get_spec()->AddTypeProfileSlot();
}
VisitDeclarations(scope->declarations()); VisitDeclarations(scope->declarations());
VisitStatements(node->body()); VisitStatements(node->body());
node->set_ast_properties(&properties_); node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason()); node->set_dont_optimize_reason(dont_optimize_reason());
node->set_yield_count(yield_count_); node->set_suspend_count(suspend_count_);
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) { if (disable_crankshaft_reason_ != kNoReason) {
@ -714,12 +735,14 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
bool AstNumbering::Renumber( bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function, uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals) { Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles; DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref; DisallowHandleDereference no_deref;
AstNumberingVisitor visitor(stack_limit, zone, eager_literals); AstNumberingVisitor visitor(stack_limit, zone, eager_literals,
collect_type_profile);
return visitor.Renumber(function); return visitor.Renumber(function);
} }
} // namespace internal } // namespace internal

30
deps/v8/src/ast/ast-numbering.h

@ -22,29 +22,33 @@ template <typename T>
class ZoneVector; class ZoneVector;
namespace AstNumbering { namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node // Assign type feedback IDs, bailout IDs, and generator suspend IDs to an AST
// tree; perform catch prediction for TryStatements. If |eager_literals| is // node tree; perform catch prediction for TryStatements. If |eager_literals| is
// non-null, adds any eager inner literal functions into it. // non-null, adds any eager inner literal functions into it.
bool Renumber( bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function, uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals); ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals,
bool collect_type_profile = false);
} }
// Some details on yield IDs // Some details on suspend IDs
// ------------------------- // -------------------------
// //
// In order to assist Ignition in generating bytecode for a generator function, // In order to assist Ignition in generating bytecode for a generator function,
// we assign a unique number (the yield ID) to each Yield node in its AST. We // we assign a unique number (the suspend ID) to each Suspend node in its AST.
// also annotate loops with the number of yields they contain (loop.yield_count) // We also annotate loops with the number of suspends they contain
// and the smallest ID of those (loop.first_yield_id), and we annotate the // (loop.suspend_count) and the smallest ID of those (loop.first_suspend_id),
// function itself with the number of yields it contains (function.yield_count). // and we annotate the function itself with the number of suspends it contains
// (function.suspend_count).
// //
// The way in which we choose the IDs is simply by enumerating the Yield nodes. // The way in which we choose the IDs is simply by enumerating the Suspend
// nodes.
// Ignition relies on the following properties: // Ignition relies on the following properties:
// - For each loop l and each yield y of l: // - For each loop l and each suspend y of l:
// l.first_yield_id <= y.yield_id < l.first_yield_id + l.yield_count // l.first_suspend_id <=
// - For the generator function f itself and each yield y of f: // s.suspend_id < l.first_suspend_id + l.suspend_count
// 0 <= y.yield_id < f.yield_count // - For the generator function f itself and each suspend s of f:
// 0 <= s.suspend_id < f.suspend_count
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

9
deps/v8/src/ast/ast-traversal-visitor.h

@ -357,7 +357,7 @@ void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
} }
template <class Subclass> template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) { void AstTraversalVisitor<Subclass>::VisitSuspend(Suspend* expr) {
PROCESS_EXPRESSION(expr); PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->generator_object())); RECURSE_EXPRESSION(Visit(expr->generator_object()));
RECURSE_EXPRESSION(Visit(expr->expression())); RECURSE_EXPRESSION(Visit(expr->expression()));
@ -476,6 +476,13 @@ void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
RECURSE_EXPRESSION(Visit(expr->iterable())); RECURSE_EXPRESSION(Visit(expr->iterable()));
} }
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitImportCallExpression(
ImportCallExpression* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->argument()));
}
template <class Subclass> template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference( void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) { SuperPropertyReference* expr) {

4
deps/v8/src/ast/ast-types.cc

@ -186,7 +186,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
if (map == heap->boolean_map()) return kBoolean; if (map == heap->boolean_map()) return kBoolean;
if (map == heap->the_hole_map()) return kHole; if (map == heap->the_hole_map()) return kHole;
DCHECK(map == heap->uninitialized_map() || DCHECK(map == heap->uninitialized_map() ||
map == heap->no_interceptor_result_sentinel_map() ||
map == heap->termination_exception_map() || map == heap->termination_exception_map() ||
map == heap->arguments_marker_map() || map == heap->arguments_marker_map() ||
map == heap->optimized_out_map() || map == heap->optimized_out_map() ||
@ -209,6 +208,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE: case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE: case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE: case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE: case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE: case JS_ARRAY_TYPE:
@ -287,6 +287,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case PROPERTY_CELL_TYPE: case PROPERTY_CELL_TYPE:
case MODULE_TYPE: case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE: case MODULE_INFO_ENTRY_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
return kOtherInternal & kTaggedPointer; return kOtherInternal & kTaggedPointer;
// Remaining instance types are unsupported for now. If any of them do // Remaining instance types are unsupported for now. If any of them do
@ -311,6 +312,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ALIASED_ARGUMENTS_ENTRY_TYPE: case ALIASED_ARGUMENTS_ENTRY_TYPE:
case DEBUG_INFO_TYPE: case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE: case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case CELL_TYPE: case CELL_TYPE:
case WEAK_CELL_TYPE: case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE: case PROTOTYPE_INFO_TYPE:

86
deps/v8/src/ast/ast-value-factory.cc

@ -84,21 +84,8 @@ class AstRawStringInternalizationKey : public HashTableKey {
const AstRawString* string_; const AstRawString* string_;
}; };
int AstString::length() const {
if (IsRawStringBits::decode(bit_field_)) {
return reinterpret_cast<const AstRawString*>(this)->length();
}
return reinterpret_cast<const AstConsString*>(this)->length();
}
void AstString::Internalize(Isolate* isolate) {
if (IsRawStringBits::decode(bit_field_)) {
return reinterpret_cast<AstRawString*>(this)->Internalize(isolate);
}
return reinterpret_cast<AstConsString*>(this)->Internalize(isolate);
}
void AstRawString::Internalize(Isolate* isolate) { void AstRawString::Internalize(Isolate* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) { if (literal_bytes_.length() == 0) {
set_string(isolate->factory()->empty_string()); set_string(isolate->factory()->empty_string());
} else { } else {
@ -121,18 +108,26 @@ bool AstRawString::AsArrayIndex(uint32_t* index) const {
} }
bool AstRawString::IsOneByteEqualTo(const char* data) const { bool AstRawString::IsOneByteEqualTo(const char* data) const {
int length = static_cast<int>(strlen(data)); if (!is_one_byte()) return false;
if (is_one_byte() && literal_bytes_.length() == length) {
const char* token = reinterpret_cast<const char*>(literal_bytes_.start()); size_t length = static_cast<size_t>(literal_bytes_.length());
return !strncmp(token, data, length); if (length != strlen(data)) return false;
}
return false; return 0 == strncmp(reinterpret_cast<const char*>(literal_bytes_.start()),
data, length);
}
uint16_t AstRawString::FirstCharacter() const {
if (is_one_byte()) return literal_bytes_[0];
const uint16_t* c = reinterpret_cast<const uint16_t*>(literal_bytes_.start());
return *c;
} }
bool AstRawString::Compare(void* a, void* b) { bool AstRawString::Compare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a); const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b); const AstRawString* rhs = static_cast<AstRawString*>(b);
DCHECK_EQ(lhs->hash(), rhs->hash()); DCHECK_EQ(lhs->hash(), rhs->hash());
if (lhs->length() != rhs->length()) return false; if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data(); const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data(); const unsigned char* r = rhs->raw_data();
@ -161,11 +156,20 @@ bool AstRawString::Compare(void* a, void* b) {
} }
void AstConsString::Internalize(Isolate* isolate) { void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are if (IsEmpty()) {
// already internalized. set_string(isolate->factory()->empty_string());
set_string(isolate->factory() return;
->NewConsString(left_->string(), right_->string()) }
.ToHandleChecked()); // AstRawStrings are internalized before AstConsStrings, so
// AstRawString::string() will just work.
Handle<String> tmp(segment_.string->string());
for (AstConsString::Segment* current = segment_.next; current != nullptr;
current = current->next) {
tmp = isolate->factory()
->NewConsString(current->string->string(), tmp)
.ToHandleChecked();
}
set_string(tmp);
} }
bool AstValue::IsPropertyName() const { bool AstValue::IsPropertyName() const {
@ -285,22 +289,34 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
return result; return result;
} }
AstConsString* AstValueFactory::NewConsString() {
const AstConsString* AstValueFactory::NewConsString( AstConsString* new_string = new (zone_) AstConsString;
const AstString* left, const AstString* right) { DCHECK_NOT_NULL(new_string);
// This Vector will be valid as long as the Collector is alive (meaning that AddConsString(new_string);
// the AstRawString will not be moved).
AstConsString* new_string = new (zone_) AstConsString(left, right);
CHECK(new_string != nullptr);
AddString(new_string);
return new_string; return new_string;
} }
AstConsString* AstValueFactory::NewConsString(const AstRawString* str) {
return NewConsString()->AddString(zone_, str);
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
const AstRawString* str2) {
return NewConsString()->AddString(zone_, str1)->AddString(zone_, str2);
}
void AstValueFactory::Internalize(Isolate* isolate) { void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to // Strings need to be internalized before values, because values refer to
// strings. // strings.
for (AstString* current = strings_; current != nullptr;) { for (AstRawString* current = strings_; current != nullptr;) {
AstString* next = current->next(); AstRawString* next = current->next();
current->Internalize(isolate);
current = next;
}
// AstConsStrings refer to AstRawStrings.
for (AstConsString* current = cons_strings_; current != nullptr;) {
AstConsString* next = current->next();
current->Internalize(isolate); current->Internalize(isolate);
current = next; current = next;
} }

211
deps/v8/src/ast/ast-value-factory.h

@ -35,123 +35,144 @@
#include "src/isolate.h" #include "src/isolate.h"
#include "src/utils.h" #include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values // Ast(Raw|Cons)String, AstValue and AstValueFactory are for storing strings and
// independent of the V8 heap and internalizing them later. During parsing, // values independent of the V8 heap and internalizing them later. During
// AstStrings and AstValues are created and stored outside the heap, in // parsing, they are created and stored outside the heap, in AstValueFactory.
// AstValueFactory. After parsing, the strings and values are internalized // After parsing, the strings and values are internalized (moved into the V8
// (moved into the V8 heap). // heap).
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class AstString : public ZoneObject { class AstRawString final : public ZoneObject {
public:
explicit AstString(bool is_raw)
: next_(nullptr), bit_field_(IsRawStringBits::encode(is_raw)) {}
int length() const;
bool IsEmpty() const { return length() == 0; }
// Puts the string into the V8 heap.
void Internalize(Isolate* isolate);
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
return Handle<String>(string_);
}
AstString* next() { return next_; }
AstString** next_location() { return &next_; }
protected:
void set_string(Handle<String> string) { string_ = string.location(); }
// {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
union {
AstString* next_;
String** string_;
};
// Poor-man's virtual dispatch to AstRawString / AstConsString. Takes less
// memory.
class IsRawStringBits : public BitField<bool, 0, 1> {};
int bit_field_;
};
class AstRawString final : public AstString {
public: public:
bool IsEmpty() const { return literal_bytes_.length() == 0; }
int length() const { int length() const {
if (is_one_byte()) return literal_bytes_.length(); return is_one_byte() ? literal_bytes_.length()
return literal_bytes_.length() / 2; : literal_bytes_.length() / 2;
} }
bool AsArrayIndex(uint32_t* index) const;
int byte_length() const { return literal_bytes_.length(); } bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const;
void Internalize(Isolate* isolate); void Internalize(Isolate* isolate);
bool AsArrayIndex(uint32_t* index) const; // Access the physical representation:
bool is_one_byte() const { return is_one_byte_; }
// The string is not null-terminated, use length() to find out the length. int byte_length() const { return literal_bytes_.length(); }
const unsigned char* raw_data() const { const unsigned char* raw_data() const {
return literal_bytes_.start(); return literal_bytes_.start();
} }
bool is_one_byte() const { return IsOneByteBits::decode(bit_field_); }
bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const {
if (is_one_byte()) return literal_bytes_[0];
const uint16_t* c =
reinterpret_cast<const uint16_t*>(literal_bytes_.start());
return *c;
}
static bool Compare(void* a, void* b);
// For storing AstRawStrings in a hash map. // For storing AstRawStrings in a hash map.
uint32_t hash() const { uint32_t hash() const {
return hash_; return hash_;
} }
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
DCHECK(has_string_);
return Handle<String>(string_);
}
private: private:
friend class AstRawStringInternalizationKey; friend class AstRawStringInternalizationKey;
friend class AstStringConstants; friend class AstStringConstants;
friend class AstValueFactory; friend class AstValueFactory;
// Members accessed only by the AstValueFactory & related classes:
static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes, AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash) uint32_t hash)
: AstString(true), hash_(hash), literal_bytes_(literal_bytes) { : next_(nullptr),
bit_field_ |= IsOneByteBits::encode(is_one_byte); literal_bytes_(literal_bytes),
hash_(hash),
is_one_byte_(is_one_byte) {}
AstRawString* next() {
DCHECK(!has_string_);
return next_;
}
AstRawString** next_location() {
DCHECK(!has_string_);
return &next_;
} }
AstRawString() : AstString(true), hash_(0) { void set_string(Handle<String> string) {
bit_field_ |= IsOneByteBits::encode(true); DCHECK(!string.is_null());
DCHECK(!has_string_);
string_ = string.location();
#ifdef DEBUG
has_string_ = true;
#endif
} }
class IsOneByteBits : public BitField<bool, IsRawStringBits::kNext, 1> {}; // {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
union {
AstRawString* next_;
String** string_;
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
uint32_t hash_; uint32_t hash_;
// Points to memory owned by Zone. bool is_one_byte_;
Vector<const byte> literal_bytes_; #ifdef DEBUG
// (Debug-only:) Verify the object life-cylce: Some functions may only be
// called after internalization (that is, after a v8::internal::String has
// been set); some only before.
bool has_string_ = false;
#endif
}; };
class AstConsString final : public ZoneObject {
class AstConsString final : public AstString {
public: public:
AstConsString(const AstString* left, const AstString* right) AstConsString* AddString(Zone* zone, const AstRawString* s) {
: AstString(false), if (s->IsEmpty()) return this;
length_(left->length() + right->length()), if (!IsEmpty()) {
left_(left), // We're putting the new string to the head of the list, meaning
right_(right) {} // the string segments will be in reverse order.
Segment* tmp = new (zone->New(sizeof(Segment))) Segment;
*tmp = segment_;
segment_.next = tmp;
}
segment_.string = s;
return this;
}
int length() const { return length_; } bool IsEmpty() const {
DCHECK_IMPLIES(segment_.string == nullptr, segment_.next == nullptr);
DCHECK_IMPLIES(segment_.string != nullptr, !segment_.string->IsEmpty());
return segment_.string == nullptr;
}
void Internalize(Isolate* isolate); void Internalize(Isolate* isolate);
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
return Handle<String>(string_);
}
private: private:
const int length_; friend class AstValueFactory;
const AstString* left_;
const AstString* right_; AstConsString() : next_(nullptr), segment_({nullptr, nullptr}) {}
AstConsString* next() const { return next_; }
AstConsString** next_location() { return &next_; }
// {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
void set_string(Handle<String> string) { string_ = string.location(); }
union {
AstConsString* next_;
String** string_;
};
struct Segment {
const AstRawString* string;
AstConsString::Segment* next;
};
Segment segment_;
}; };
enum class AstSymbol : uint8_t { kHomeObjectSymbol }; enum class AstSymbol : uint8_t { kHomeObjectSymbol };
@ -310,6 +331,7 @@ class AstValue : public ZoneObject {
F(arguments, "arguments") \ F(arguments, "arguments") \
F(async, "async") \ F(async, "async") \
F(await, "await") \ F(await, "await") \
F(boolean, "boolean") \
F(constructor, "constructor") \ F(constructor, "constructor") \
F(default, "default") \ F(default, "default") \
F(done, "done") \ F(done, "done") \
@ -330,11 +352,15 @@ class AstValue : public ZoneObject {
F(native, "native") \ F(native, "native") \
F(new_target, ".new.target") \ F(new_target, ".new.target") \
F(next, "next") \ F(next, "next") \
F(number, "number") \
F(object, "object") \
F(proto, "__proto__") \ F(proto, "__proto__") \
F(prototype, "prototype") \ F(prototype, "prototype") \
F(return, "return") \ F(return, "return") \
F(set_space, "set ") \ F(set_space, "set ") \
F(star_default_star, "*default*") \ F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
F(this, "this") \ F(this, "this") \
F(this_function, ".this_function") \ F(this_function, ".this_function") \
F(throw, "throw") \ F(throw, "throw") \
@ -407,7 +433,10 @@ class AstValueFactory {
values_(nullptr), values_(nullptr),
strings_(nullptr), strings_(nullptr),
strings_end_(&strings_), strings_end_(&strings_),
cons_strings_(nullptr),
cons_strings_end_(&cons_strings_),
string_constants_(string_constants), string_constants_(string_constants),
empty_cons_string_(nullptr),
zone_(zone), zone_(zone),
hash_seed_(hash_seed) { hash_seed_(hash_seed) {
#define F(name) name##_ = nullptr; #define F(name) name##_ = nullptr;
@ -418,6 +447,7 @@ class AstValueFactory {
std::fill(one_character_strings_, std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_), one_character_strings_ + arraysize(one_character_strings_),
nullptr); nullptr);
empty_cons_string_ = NewConsString();
} }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
@ -433,17 +463,20 @@ class AstValueFactory {
return GetTwoByteStringInternal(literal); return GetTwoByteStringInternal(literal);
} }
const AstRawString* GetString(Handle<String> literal); const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left, V8_EXPORT_PRIVATE AstConsString* NewConsString();
const AstString* right); AstConsString* NewConsString(const AstRawString* str);
AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
V8_EXPORT_PRIVATE void Internalize(Isolate* isolate); V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
#define F(name, str) \ #define F(name, str) \
const AstRawString* name##_string() { \ const AstRawString* name##_string() const { \
return string_constants_->name##_string(); \ return string_constants_->name##_string(); \
} }
STRING_CONSTANTS(F) STRING_CONSTANTS(F)
#undef F #undef F
const AstConsString* empty_cons_string() const { return empty_cons_string_; }
V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string); V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6). // A JavaScript symbol (ECMA-262 edition 6).
@ -467,14 +500,21 @@ class AstValueFactory {
values_ = value; values_ = value;
return value; return value;
} }
AstString* AddString(AstString* string) { AstRawString* AddString(AstRawString* string) {
*strings_end_ = string; *strings_end_ = string;
strings_end_ = string->next_location(); strings_end_ = string->next_location();
return string; return string;
} }
AstConsString* AddConsString(AstConsString* string) {
*cons_strings_end_ = string;
cons_strings_end_ = string->next_location();
return string;
}
void ResetStrings() { void ResetStrings() {
strings_ = nullptr; strings_ = nullptr;
strings_end_ = &strings_; strings_end_ = &strings_;
cons_strings_ = nullptr;
cons_strings_end_ = &cons_strings_;
} }
V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal( V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
Vector<const uint8_t> literal); Vector<const uint8_t> literal);
@ -490,11 +530,14 @@ class AstValueFactory {
// We need to keep track of strings_ in order since cons strings require their // We need to keep track of strings_ in order since cons strings require their
// members to be internalized first. // members to be internalized first.
AstString* strings_; AstRawString* strings_;
AstString** strings_end_; AstRawString** strings_end_;
AstConsString* cons_strings_;
AstConsString** cons_strings_end_;
// Holds constant string values which are shared across the isolate. // Holds constant string values which are shared across the isolate.
const AstStringConstants* string_constants_; const AstStringConstants* string_constants_;
const AstConsString* empty_cons_string_;
// Caches for faster access: small numbers, one character lowercase strings // Caches for faster access: small numbers, one character lowercase strings
// (for minified code). // (for minified code).

68
deps/v8/src/ast/ast.cc

@ -51,6 +51,7 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
void AstNode::Print() { Print(Isolate::Current()); } void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) { void AstNode::Print(Isolate* isolate) {
AllowHandleDereference allow_deref;
AstPrinter::PrintOut(isolate, this); AstPrinter::PrintOut(isolate, this);
} }
@ -163,7 +164,7 @@ void Expression::MarkTail() {
bool DoExpression::IsAnonymousFunctionDefinition() const { bool DoExpression::IsAnonymousFunctionDefinition() const {
// This is specifically to allow DoExpressions to represent ClassLiterals. // This is specifically to allow DoExpressions to represent ClassLiterals.
return represented_function_ != nullptr && return represented_function_ != nullptr &&
represented_function_->raw_name()->length() == 0; represented_function_->raw_name()->IsEmpty();
} }
bool Statement::IsJump() const { bool Statement::IsJump() const {
@ -249,16 +250,16 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
FeedbackSlot* out_slot) { FeedbackSlot* out_slot) {
Property* property = expr->AsProperty(); Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property); LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE && // TODO(ishell): consider using ICSlotCache for variables here.
expr->AsVariableProxy()->var()->IsUnallocated()) || if (assign_type == VARIABLE &&
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) { expr->AsVariableProxy()->var()->IsUnallocated()) {
// TODO(ishell): consider using ICSlotCache for variables here. *out_slot = spec->AddStoreGlobalICSlot(language_mode);
if (assign_type == KEYED_PROPERTY) {
*out_slot = spec->AddKeyedStoreICSlot(language_mode);
} else { } else if (assign_type == NAMED_PROPERTY) {
*out_slot = spec->AddStoreICSlot(language_mode); *out_slot = spec->AddStoreICSlot(language_mode);
}
} else if (assign_type == KEYED_PROPERTY) {
*out_slot = spec->AddKeyedStoreICSlot(language_mode);
} }
} }
@ -681,8 +682,8 @@ bool ObjectLiteral::IsFastCloningSupported() const {
// literals don't support copy-on-write (COW) elements for now. // literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements. // TODO(mvstanton): make object literals support COW elements.
return fast_elements() && has_shallow_properties() && return fast_elements() && has_shallow_properties() &&
properties_count() <= ConstructorBuiltinsAssembler:: properties_count() <=
kMaximumClonedShallowObjectProperties; ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
} }
ElementsKind ArrayLiteral::constant_elements_kind() const { ElementsKind ArrayLiteral::constant_elements_kind() const {
@ -786,7 +787,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
bool ArrayLiteral::IsFastCloningSupported() const { bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 && return depth() <= 1 &&
values()->length() <= values()->length() <=
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements; ConstructorBuiltins::kMaximumClonedShallowArrayElements;
} }
void ArrayLiteral::RewindSpreads() { void ArrayLiteral::RewindSpreads() {
@ -883,6 +884,30 @@ void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
} }
} }
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
// Add is not commutative due to potential for string addition.
return op == Token::MUL || op == Token::BIT_AND || op == Token::BIT_OR ||
op == Token::BIT_XOR;
}
// Check for the pattern: x + 1.
static bool MatchSmiLiteralOperation(Expression* left, Expression* right,
Expression** expr, Smi** literal) {
if (right->IsSmiLiteral()) {
*expr = left;
*literal = right->AsLiteral()->AsSmiLiteral();
return true;
}
return false;
}
bool BinaryOperation::IsSmiLiteralOperation(Expression** subexpr,
Smi** literal) {
return MatchSmiLiteralOperation(left_, right_, subexpr, literal) ||
(IsCommutativeOperationWithSmiLiteral(op()) &&
MatchSmiLiteralOperation(right_, left_, subexpr, literal));
}
static bool IsTypeof(Expression* expr) { static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation(); UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF; return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
@ -904,24 +929,21 @@ void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
} }
// Check for the pattern: typeof <expression> equals <string literal>. // Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left, static bool MatchLiteralCompareTypeof(Expression* left, Token::Value op,
Token::Value op, Expression* right, Expression** expr,
Expression* right, Literal** literal) {
Expression** expr,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) { if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression(); *expr = left->AsUnaryOperation()->expression();
*check = Handle<String>::cast(right->AsLiteral()->value()); *literal = right->AsLiteral();
return true; return true;
} }
return false; return false;
} }
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) { Literal** literal) {
return MatchLiteralCompareTypeof(left_, op(), right_, expr, check) || return MatchLiteralCompareTypeof(left_, op(), right_, expr, literal) ||
MatchLiteralCompareTypeof(right_, op(), left_, expr, check); MatchLiteralCompareTypeof(right_, op(), left_, expr, literal);
} }

175
deps/v8/src/ast/ast.h

@ -91,7 +91,7 @@ namespace internal {
V(Conditional) \ V(Conditional) \
V(VariableProxy) \ V(VariableProxy) \
V(Literal) \ V(Literal) \
V(Yield) \ V(Suspend) \
V(Throw) \ V(Throw) \
V(CallRuntime) \ V(CallRuntime) \
V(UnaryOperation) \ V(UnaryOperation) \
@ -105,7 +105,8 @@ namespace internal {
V(EmptyParentheses) \ V(EmptyParentheses) \
V(GetIterator) \ V(GetIterator) \
V(DoExpression) \ V(DoExpression) \
V(RewritableExpression) V(RewritableExpression) \
V(ImportCallExpression)
#define AST_NODE_LIST(V) \ #define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \ DECLARATION_NODE_LIST(V) \
@ -563,11 +564,11 @@ class IterationStatement : public BreakableStatement {
Statement* body() const { return body_; } Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; } void set_body(Statement* s) { body_ = s; }
int yield_count() const { return yield_count_; } int suspend_count() const { return suspend_count_; }
int first_yield_id() const { return first_yield_id_; } int first_suspend_id() const { return first_suspend_id_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; } void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
void set_first_yield_id(int first_yield_id) { void set_first_suspend_id(int first_suspend_id) {
first_yield_id_ = first_yield_id; first_suspend_id_ = first_suspend_id;
} }
static int num_ids() { return parent_num_ids() + 1; } static int num_ids() { return parent_num_ids() + 1; }
@ -581,8 +582,8 @@ class IterationStatement : public BreakableStatement {
NodeType type) NodeType type)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, type), : BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, type),
body_(NULL), body_(NULL),
yield_count_(0), suspend_count_(0),
first_yield_id_(0) {} first_suspend_id_(0) {}
static int parent_num_ids() { return BreakableStatement::num_ids(); } static int parent_num_ids() { return BreakableStatement::num_ids(); }
void Initialize(Statement* body) { body_ = body; } void Initialize(Statement* body) { body_ = body; }
@ -594,8 +595,8 @@ class IterationStatement : public BreakableStatement {
Statement* body_; Statement* body_;
Label continue_target_; Label continue_target_;
int yield_count_; int suspend_count_;
int first_yield_id_; int first_suspend_id_;
}; };
@ -1101,7 +1102,6 @@ class TryStatement : public Statement {
class TryCatchStatement final : public TryStatement { class TryCatchStatement final : public TryStatement {
public: public:
Scope* scope() { return scope_; } Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; } Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; } void set_catch_block(Block* b) { catch_block_ = b; }
@ -1122,18 +1122,15 @@ class TryCatchStatement final : public TryStatement {
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
TryCatchStatement(Block* try_block, Scope* scope, Variable* variable, TryCatchStatement(Block* try_block, Scope* scope, Block* catch_block,
Block* catch_block,
HandlerTable::CatchPrediction catch_prediction, int pos) HandlerTable::CatchPrediction catch_prediction, int pos)
: TryStatement(try_block, pos, kTryCatchStatement), : TryStatement(try_block, pos, kTryCatchStatement),
scope_(scope), scope_(scope),
variable_(variable),
catch_block_(catch_block) { catch_block_(catch_block) {
catch_prediction_ = catch_prediction; catch_prediction_ = catch_prediction;
} }
Scope* scope_; Scope* scope_;
Variable* variable_;
Block* catch_block_; Block* catch_block_;
}; };
@ -1205,6 +1202,11 @@ class Literal final : public Expression {
return value_->AsString(); return value_->AsString();
} }
Smi* AsSmiLiteral() {
DCHECK(IsSmiLiteral());
return raw_value()->AsSmi();
}
bool ToBooleanIsTrue() const { return raw_value()->BooleanValue(); } bool ToBooleanIsTrue() const { return raw_value()->BooleanValue(); }
bool ToBooleanIsFalse() const { return !raw_value()->BooleanValue(); } bool ToBooleanIsFalse() const { return !raw_value()->BooleanValue(); }
@ -2138,6 +2140,11 @@ class BinaryOperation final : public Expression {
TypeFeedbackId BinaryOperationFeedbackId() const { TypeFeedbackId BinaryOperationFeedbackId() const {
return TypeFeedbackId(local_id(1)); return TypeFeedbackId(local_id(1));
} }
// Returns true if one side is a Smi literal, returning the other side's
// sub-expression in |subexpr| and the literal Smi in |literal|.
bool IsSmiLiteralOperation(Expression** subexpr, Smi** literal);
Maybe<int> fixed_right_arg() const { Maybe<int> fixed_right_arg() const {
return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>(); return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>();
} }
@ -2279,7 +2286,7 @@ class CompareOperation final : public Expression {
FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; } FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
// Match special cases. // Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check); bool IsLiteralCompareTypeof(Expression** expr, Literal** literal);
bool IsLiteralCompareUndefined(Expression** expr); bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr); bool IsLiteralCompareNull(Expression** expr);
@ -2493,10 +2500,16 @@ class RewritableExpression final : public Expression {
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {}; : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
}; };
// There are several types of Suspend node:
//
// Yield
// YieldStar
// Await
//
// Our Yield is different from the JS yield in that it "returns" its argument as // Our Yield is different from the JS yield in that it "returns" its argument as
// is, without wrapping it in an iterator result object. Such wrapping, if // is, without wrapping it in an iterator result object. Such wrapping, if
// desired, must be done beforehand (see the parser). // desired, must be done beforehand (see the parser).
class Yield final : public Expression { class Suspend final : public Expression {
public: public:
enum OnException { kOnExceptionThrow, kOnExceptionRethrow }; enum OnException { kOnExceptionThrow, kOnExceptionRethrow };
@ -2508,30 +2521,59 @@ class Yield final : public Expression {
bool rethrow_on_exception() const { bool rethrow_on_exception() const {
return on_exception() == kOnExceptionRethrow; return on_exception() == kOnExceptionRethrow;
} }
int yield_id() const { return yield_id_; }
int suspend_id() const { return suspend_id_; }
SuspendFlags flags() const { return FlagsField::decode(bit_field_); }
SuspendFlags suspend_type() const {
return flags() & SuspendFlags::kSuspendTypeMask;
}
SuspendFlags generator_type() const {
return flags() & SuspendFlags::kGeneratorTypeMask;
}
bool is_yield() const { return suspend_type() == SuspendFlags::kYield; }
bool is_yield_star() const {
return suspend_type() == SuspendFlags::kYieldStar;
}
bool is_await() const { return suspend_type() == SuspendFlags::kAwait; }
bool is_async_generator() const {
return generator_type() == SuspendFlags::kAsyncGenerator;
}
inline bool IsNonInitialAsyncGeneratorYield() const {
// Return true if is_async_generator() && !is_await() && yield_id() > 0
return suspend_id() > 0 && (flags() & SuspendFlags::kAsyncGeneratorAwait) ==
SuspendFlags::kAsyncGenerator;
}
void set_generator_object(Expression* e) { generator_object_ = e; } void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; } void set_expression(Expression* e) { expression_ = e; }
void set_yield_id(int yield_id) { yield_id_ = yield_id; } void set_suspend_id(int id) { suspend_id_ = id; }
void set_suspend_type(SuspendFlags type) {
DCHECK_EQ(0, static_cast<int>(type & ~SuspendFlags::kSuspendTypeMask));
bit_field_ = FlagsField::update(bit_field_, type);
}
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
Yield(Expression* generator_object, Expression* expression, int pos, Suspend(Expression* generator_object, Expression* expression, int pos,
OnException on_exception) OnException on_exception, SuspendFlags flags)
: Expression(pos, kYield), : Expression(pos, kSuspend),
yield_id_(-1), suspend_id_(-1),
generator_object_(generator_object), generator_object_(generator_object),
expression_(expression) { expression_(expression) {
bit_field_ |= OnExceptionField::encode(on_exception); bit_field_ |=
OnExceptionField::encode(on_exception) | FlagsField::encode(flags);
} }
int yield_id_; int suspend_id_;
Expression* generator_object_; Expression* generator_object_;
Expression* expression_; Expression* expression_;
class OnExceptionField class OnExceptionField
: public BitField<OnException, Expression::kNextBitFieldIndex, 1> {}; : public BitField<OnException, Expression::kNextBitFieldIndex, 1> {};
class FlagsField
: public BitField<SuspendFlags, OnExceptionField::kNext,
static_cast<int>(SuspendFlags::kBitWidth)> {};
}; };
@ -2566,8 +2608,8 @@ class FunctionLiteral final : public Expression {
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile }; enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
Handle<String> name() const { return raw_name_->string(); } Handle<String> name() const { return raw_name_->string(); }
const AstString* raw_name() const { return raw_name_; } const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstString* name) { raw_name_ = name; } void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; } DeclarationScope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; } ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; } void set_function_token_position(int pos) { function_token_position_ = pos; }
@ -2593,7 +2635,11 @@ class FunctionLiteral final : public Expression {
static bool NeedsHomeObject(Expression* expr); static bool NeedsHomeObject(Expression* expr);
int expected_property_count() { return expected_property_count_; } int expected_property_count() {
// Not valid for lazy functions.
DCHECK_NOT_NULL(body_);
return expected_property_count_;
}
int parameter_count() { return parameter_count_; } int parameter_count() { return parameter_count_; }
int function_length() { return function_length_; } int function_length() { return function_length_; }
@ -2626,7 +2672,7 @@ class FunctionLiteral final : public Expression {
raw_inferred_name_ = NULL; raw_inferred_name_ = NULL;
} }
void set_raw_inferred_name(const AstString* raw_inferred_name) { void set_raw_inferred_name(const AstConsString* raw_inferred_name) {
DCHECK(raw_inferred_name != NULL); DCHECK(raw_inferred_name != NULL);
raw_inferred_name_ = raw_inferred_name; raw_inferred_name_ = raw_inferred_name;
DCHECK(inferred_name_.is_null()); DCHECK(inferred_name_.is_null());
@ -2637,6 +2683,8 @@ class FunctionLiteral final : public Expression {
void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); } void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
bool has_duplicate_parameters() const { bool has_duplicate_parameters() const {
// Not valid for lazy functions.
DCHECK_NOT_NULL(body_);
return HasDuplicateParameters::decode(bit_field_); return HasDuplicateParameters::decode(bit_field_);
} }
@ -2682,8 +2730,8 @@ class FunctionLiteral final : public Expression {
return is_anonymous_expression(); return is_anonymous_expression();
} }
int yield_count() { return yield_count_; } int suspend_count() { return suspend_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; } void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
int return_position() { int return_position() {
return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0)); return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
@ -2697,7 +2745,7 @@ class FunctionLiteral final : public Expression {
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
FunctionLiteral(Zone* zone, const AstString* name, FunctionLiteral(Zone* zone, const AstRawString* name,
AstValueFactory* ast_value_factory, DeclarationScope* scope, AstValueFactory* ast_value_factory, DeclarationScope* scope,
ZoneList<Statement*>* body, int expected_property_count, ZoneList<Statement*>* body, int expected_property_count,
int parameter_count, int function_length, int parameter_count, int function_length,
@ -2710,12 +2758,12 @@ class FunctionLiteral final : public Expression {
parameter_count_(parameter_count), parameter_count_(parameter_count),
function_length_(function_length), function_length_(function_length),
function_token_position_(kNoSourcePosition), function_token_position_(kNoSourcePosition),
yield_count_(0), suspend_count_(0),
has_braces_(has_braces), has_braces_(has_braces),
raw_name_(name), raw_name_(ast_value_factory->NewConsString(name)),
scope_(scope), scope_(scope),
body_(body), body_(body),
raw_inferred_name_(ast_value_factory->empty_string()), raw_inferred_name_(ast_value_factory->empty_cons_string()),
ast_properties_(zone), ast_properties_(zone),
function_literal_id_(function_literal_id) { function_literal_id_(function_literal_id) {
bit_field_ |= FunctionTypeBits::encode(function_type) | bit_field_ |= FunctionTypeBits::encode(function_type) |
@ -2725,6 +2773,7 @@ class FunctionLiteral final : public Expression {
ShouldNotBeUsedOnceHintField::encode(false) | ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason); DontOptimizeReasonField::encode(kNoReason);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile(); if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
} }
class FunctionTypeBits class FunctionTypeBits
@ -2741,13 +2790,13 @@ class FunctionLiteral final : public Expression {
int parameter_count_; int parameter_count_;
int function_length_; int function_length_;
int function_token_position_; int function_token_position_;
int yield_count_; int suspend_count_;
bool has_braces_; bool has_braces_;
const AstString* raw_name_; const AstConsString* raw_name_;
DeclarationScope* scope_; DeclarationScope* scope_;
ZoneList<Statement*>* body_; ZoneList<Statement*>* body_;
const AstString* raw_inferred_name_; const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_; Handle<String> inferred_name_;
AstProperties ast_properties_; AstProperties ast_properties_;
int function_literal_id_; int function_literal_id_;
@ -2925,6 +2974,21 @@ class SuperCallReference final : public Expression {
VariableProxy* this_function_var_; VariableProxy* this_function_var_;
}; };
// This AST Node is used to represent a dynamic import call --
// import(argument).
class ImportCallExpression final : public Expression {
public:
Expression* argument() const { return argument_; }
void set_argument(Expression* argument) { argument_ = argument; }
private:
friend class AstNodeFactory;
ImportCallExpression(Expression* argument, int pos)
: Expression(pos, kImportCallExpression), argument_(argument) {}
Expression* argument_;
};
// This class is produced when parsing the () in arrow functions without any // This class is produced when parsing the () in arrow functions without any
// arguments and is not actually a valid expression. // arguments and is not actually a valid expression.
@ -3245,38 +3309,33 @@ class AstNodeFactory final BASE_EMBEDDED {
} }
TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope, TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
Block* catch_block, int pos) { Block* catch_block, int pos) {
return new (zone_) TryCatchStatement( return new (zone_) TryCatchStatement(try_block, scope, catch_block,
try_block, scope, variable, catch_block, HandlerTable::CAUGHT, pos); HandlerTable::CAUGHT, pos);
} }
TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block, TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
Scope* scope, Scope* scope,
Variable* variable,
Block* catch_block, Block* catch_block,
int pos) { int pos) {
return new (zone_) TryCatchStatement( return new (zone_) TryCatchStatement(try_block, scope, catch_block,
try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos); HandlerTable::UNCAUGHT, pos);
} }
TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block, TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
Scope* scope, Scope* scope,
Variable* variable,
Block* catch_block, Block* catch_block,
int pos) { int pos) {
return new (zone_) TryCatchStatement( return new (zone_) TryCatchStatement(try_block, scope, catch_block,
try_block, scope, variable, catch_block, HandlerTable::DESUGARING, pos); HandlerTable::DESUGARING, pos);
} }
TryCatchStatement* NewTryCatchStatementForAsyncAwait(Block* try_block, TryCatchStatement* NewTryCatchStatementForAsyncAwait(Block* try_block,
Scope* scope, Scope* scope,
Variable* variable,
Block* catch_block, Block* catch_block,
int pos) { int pos) {
return new (zone_) return new (zone_) TryCatchStatement(try_block, scope, catch_block,
TryCatchStatement(try_block, scope, variable, catch_block, HandlerTable::ASYNC_AWAIT, pos);
HandlerTable::ASYNC_AWAIT, pos);
} }
TryFinallyStatement* NewTryFinallyStatement(Block* try_block, TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
@ -3481,10 +3540,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return assign; return assign;
} }
Yield* NewYield(Expression* generator_object, Expression* expression, int pos, Suspend* NewSuspend(Expression* generator_object, Expression* expression,
Yield::OnException on_exception) { int pos, Suspend::OnException on_exception,
SuspendFlags flags) {
if (!expression) expression = NewUndefinedLiteral(pos); if (!expression) expression = NewUndefinedLiteral(pos);
return new (zone_) Yield(generator_object, expression, pos, on_exception); return new (zone_)
Suspend(generator_object, expression, pos, on_exception, flags);
} }
Throw* NewThrow(Expression* exception, int pos) { Throw* NewThrow(Expression* exception, int pos) {
@ -3578,6 +3639,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) GetIterator(iterable, hint, pos); return new (zone_) GetIterator(iterable, hint, pos);
} }
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
return new (zone_) ImportCallExpression(args, pos);
}
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; } void set_zone(Zone* zone) { zone_ = zone; }

2
deps/v8/src/ast/context-slot-cache.h

@ -38,7 +38,7 @@ class ContextSlotCache {
for (int i = 0; i < kLength; ++i) { for (int i = 0; i < kLength; ++i) {
keys_[i].data = NULL; keys_[i].data = NULL;
keys_[i].name = NULL; keys_[i].name = NULL;
values_[i] = kNotFound; values_[i] = static_cast<uint32_t>(kNotFound);
} }
} }

3
deps/v8/src/ast/modules.h

@ -214,8 +214,9 @@ class ModuleDescriptor : public ZoneObject {
int AddModuleRequest(const AstRawString* specifier) { int AddModuleRequest(const AstRawString* specifier) {
DCHECK_NOT_NULL(specifier); DCHECK_NOT_NULL(specifier);
int module_requests_count = static_cast<int>(module_requests_.size());
auto it = module_requests_ auto it = module_requests_
.insert(std::make_pair(specifier, module_requests_.size())) .insert(std::make_pair(specifier, module_requests_count))
.first; .first;
return it->second; return it->second;
} }

73
deps/v8/src/ast/prettyprinter.cc

@ -254,9 +254,7 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->value()); Find(node->value());
} }
void CallPrinter::VisitSuspend(Suspend* node) { Find(node->expression()); }
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); } void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
@ -372,8 +370,23 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
} }
void CallPrinter::VisitGetIterator(GetIterator* node) { void CallPrinter::VisitGetIterator(GetIterator* node) {
Print("GetIterator("); // Because CallPrinter is used by RenderCallSite() in runtime-internal.cc,
// and the GetIterator node results in a Call, either to a [@@iterator] or
// [@@asyncIterator]. It's unknown which call this error refers to, so we
// assume it's the first call.
bool was_found = !found_ && node->position() == position_;
if (was_found) {
found_ = true;
}
Find(node->iterable(), true); Find(node->iterable(), true);
Print(node->hint() == IteratorType::kNormal ? "[Symbol.iterator]"
: "[Symbol.asyncIterator]");
if (was_found) done_ = true;
}
void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print("ImportCall(");
Find(node->argument(), true);
Print(")"); Print(")");
} }
@ -623,7 +636,8 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
} else { } else {
EmbeddedVector<char, 256> buf; EmbeddedVector<char, 256> buf;
int pos = int pos =
SNPrintF(buf, "%s (mode = %s", info, VariableMode2String(var->mode())); SNPrintF(buf, "%s (%p) (mode = %s", info, reinterpret_cast<void*>(var),
VariableMode2String(var->mode()));
SNPrintF(buf + pos, ")"); SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true); PrintLiteralIndented(buf.start(), value, true);
} }
@ -649,8 +663,8 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
{ IndentedScope indent(this, "FUNC", program->position()); { IndentedScope indent(this, "FUNC", program->position());
PrintIndented("KIND"); PrintIndented("KIND");
Print(" %d\n", program->kind()); Print(" %d\n", program->kind());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", program->yield_count()); Print(" %d\n", program->suspend_count());
PrintLiteralIndented("NAME", program->name(), true); PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true); PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope()); PrintParameters(program->scope());
@ -801,8 +815,8 @@ void AstPrinter::VisitCaseClause(CaseClause* clause) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) { void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position()); IndentedScope indent(this, "DO", node->position());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->yield_count()); Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels()); PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body()); PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond()); PrintIndentedVisit("COND", node->cond());
@ -811,8 +825,8 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) { void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position()); IndentedScope indent(this, "WHILE", node->position());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->yield_count()); Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels()); PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond()); PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body()); PrintIndentedVisit("BODY", node->body());
@ -821,8 +835,8 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) { void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position()); IndentedScope indent(this, "FOR", node->position());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->yield_count()); Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels()); PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init()); if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond()); if (node->cond()) PrintIndentedVisit("COND", node->cond());
@ -833,8 +847,8 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) { void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position()); IndentedScope indent(this, "FOR IN", node->position());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->yield_count()); Print(" %d\n", node->suspend_count());
PrintIndentedVisit("FOR", node->each()); PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable()); PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body()); PrintIndentedVisit("BODY", node->body());
@ -843,8 +857,8 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) { void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position()); IndentedScope indent(this, "FOR OF", node->position());
PrintIndented("YIELD COUNT"); PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->yield_count()); Print(" %d\n", node->suspend_count());
PrintIndentedVisit("INIT", node->assign_iterator()); PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result()); PrintIndentedVisit("NEXT", node->next_result());
PrintIndentedVisit("DONE", node->result_done()); PrintIndentedVisit("DONE", node->result_done());
@ -856,9 +870,8 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) { void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH", node->position()); IndentedScope indent(this, "TRY CATCH", node->position());
PrintTryStatement(node); PrintTryStatement(node);
PrintLiteralWithModeIndented("CATCHVAR", PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
node->variable(), node->scope()->catch_variable()->name());
node->variable()->name());
PrintIndentedVisit("CATCH", node->catch_block()); PrintIndentedVisit("CATCH", node->catch_block());
} }
@ -1095,10 +1108,9 @@ void AstPrinter::VisitAssignment(Assignment* node) {
Visit(node->value()); Visit(node->value());
} }
void AstPrinter::VisitSuspend(Suspend* node) {
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf; EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD id %d", node->yield_id()); SNPrintF(buf, "SUSPEND id %d", node->suspend_id());
IndentedScope indent(this, buf.start(), node->position()); IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression()); Visit(node->expression());
} }
@ -1146,14 +1158,8 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) { void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf; EmbeddedVector<char, 128> buf;
if (node->is_jsruntime()) { SNPrintF(buf, "CALL RUNTIME %s%s", node->debug_name(),
SNPrintF( node->is_jsruntime() ? " (JS function)" : "");
buf, "CALL RUNTIME %s code = %p", node->debug_name(),
static_cast<void*>(isolate_->context()->get(node->context_index())));
} else {
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
}
IndentedScope indent(this, buf.start(), node->position()); IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments()); PrintArguments(node->arguments());
} }
@ -1203,6 +1209,11 @@ void AstPrinter::VisitGetIterator(GetIterator* node) {
Visit(node->iterable()); Visit(node->iterable());
} }
void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
IndentedScope indent(this, "IMPORT-CALL", node->position());
Visit(node->argument());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) { void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION", node->position()); IndentedScope indent(this, "THIS-FUNCTION", node->position());
} }

264
deps/v8/src/ast/scopes.cc

@ -112,11 +112,12 @@ void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
} }
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone) SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
: ZoneHashMap(8, ZoneAllocationPolicy(zone)) {} : ZoneHashMap(8, ZoneAllocationPolicy(zone)), count_(0) {}
void SloppyBlockFunctionMap::Declare( void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
Zone* zone, const AstRawString* name, Scope* scope,
SloppyBlockFunctionMap::Delegate* delegate) { SloppyBlockFunctionStatement* statement) {
auto* delegate = new (zone) Delegate(scope, statement, count_++);
// AstRawStrings are unambiguous, i.e., the same string is always represented // AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*. // by the same AstRawString*.
Entry* p = Entry* p =
@ -155,14 +156,22 @@ Scope::Snapshot::Snapshot(Scope* scope)
top_inner_scope_(scope->inner_scope_), top_inner_scope_(scope->inner_scope_),
top_unresolved_(scope->unresolved_), top_unresolved_(scope->unresolved_),
top_local_(scope->GetClosureScope()->locals_.end()), top_local_(scope->GetClosureScope()->locals_.end()),
top_decl_(scope->GetClosureScope()->decls_.end()) {} top_decl_(scope->GetClosureScope()->decls_.end()),
outer_scope_calls_eval_(scope->scope_calls_eval_) {
// Reset in order to record eval calls during this Snapshot's lifetime.
outer_scope_->scope_calls_eval_ = false;
}
Scope::Snapshot::~Snapshot() {
// Restore previous calls_eval bit if needed.
if (outer_scope_calls_eval_) {
outer_scope_->scope_calls_eval_ = true;
}
}
DeclarationScope::DeclarationScope(Zone* zone, DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory) AstValueFactory* ast_value_factory)
: Scope(zone), : Scope(zone), function_kind_(kNormalFunction), params_(4, zone) {
function_kind_(kNormalFunction),
params_(4, zone),
sloppy_block_function_map_(zone) {
DCHECK_EQ(scope_type_, SCRIPT_SCOPE); DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
SetDefaults(); SetDefaults();
@ -176,8 +185,7 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
FunctionKind function_kind) FunctionKind function_kind)
: Scope(zone, outer_scope, scope_type), : Scope(zone, outer_scope, scope_type),
function_kind_(function_kind), function_kind_(function_kind),
params_(4, zone), params_(4, zone) {
sloppy_block_function_map_(zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE); DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults(); SetDefaults();
asm_function_ = outer_scope_->IsAsmModule(); asm_function_ = outer_scope_->IsAsmModule();
@ -193,10 +201,11 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
DeclareThis(ast_value_factory); DeclareThis(ast_value_factory);
} }
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info, ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory) AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) { : DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
Zone* zone = avfactory->zone(); Zone* zone = avfactory->zone();
Isolate* isolate = scope_info->GetIsolate();
Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate); Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
set_language_mode(STRICT); set_language_mode(STRICT);
@ -254,20 +263,22 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
set_language_mode(scope_info->language_mode()); set_language_mode(scope_info->language_mode());
num_heap_slots_ = scope_info->ContextLength(); num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_); DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
// We don't really need to use the preparsed scope data; this is just to
// shorten the recursion in SetMustUsePreParsedScopeData.
must_use_preparsed_scope_data_ = true;
} }
DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type, DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
Handle<ScopeInfo> scope_info) Handle<ScopeInfo> scope_info)
: Scope(zone, scope_type, scope_info), : Scope(zone, scope_type, scope_info),
function_kind_(scope_info->function_kind()), function_kind_(scope_info->function_kind()),
params_(0, zone), params_(0, zone) {
sloppy_block_function_map_(zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE); DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults(); SetDefaults();
} }
Scope::Scope(Zone* zone, const AstRawString* catch_variable_name, Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
Handle<ScopeInfo> scope_info) MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info)
: zone_(zone), : zone_(zone),
outer_scope_(nullptr), outer_scope_(nullptr),
variables_(zone), variables_(zone),
@ -280,7 +291,8 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the // Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch // scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable. // variable as first and only variable.
Variable* variable = Declare(zone, catch_variable_name, VAR); Variable* variable = Declare(zone, catch_variable_name, VAR, NORMAL_VARIABLE,
kCreatedInitialized, maybe_assigned);
AllocateHeapSlot(variable); AllocateHeapSlot(variable);
} }
@ -293,6 +305,7 @@ void DeclarationScope::SetDefaults() {
has_arguments_parameter_ = false; has_arguments_parameter_ = false;
scope_uses_super_property_ = false; scope_uses_super_property_ = false;
has_rest_ = false; has_rest_ = false;
sloppy_block_function_map_ = nullptr;
receiver_ = nullptr; receiver_ = nullptr;
new_target_ = nullptr; new_target_ = nullptr;
function_ = nullptr; function_ = nullptr;
@ -300,6 +313,7 @@ void DeclarationScope::SetDefaults() {
rare_data_ = nullptr; rare_data_ = nullptr;
should_eager_compile_ = false; should_eager_compile_ = false;
was_lazily_parsed_ = false; was_lazily_parsed_ = false;
is_skipped_function_ = false;
#ifdef DEBUG #ifdef DEBUG
DeclarationScope* outer_declaration_scope = DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr; outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@ -336,6 +350,8 @@ void Scope::SetDefaults() {
force_context_allocation_ = false; force_context_allocation_ = false;
is_declaration_scope_ = false; is_declaration_scope_ = false;
must_use_preparsed_scope_data_ = false;
} }
bool Scope::HasSimpleParameters() { bool Scope::HasSimpleParameters() {
@ -369,8 +385,7 @@ bool Scope::IsAsmFunction() const {
return is_function_scope() && AsDeclarationScope()->asm_function(); return is_function_scope() && AsDeclarationScope()->asm_function();
} }
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
ScopeInfo* scope_info,
DeclarationScope* script_scope, DeclarationScope* script_scope,
AstValueFactory* ast_value_factory, AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) { DeserializationMode deserialization_mode) {
@ -415,15 +430,20 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info)); outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
} }
} else if (scope_info->scope_type() == MODULE_SCOPE) { } else if (scope_info->scope_type() == MODULE_SCOPE) {
outer_scope = new (zone) outer_scope =
ModuleScope(isolate, handle(scope_info), ast_value_factory); new (zone) ModuleScope(handle(scope_info), ast_value_factory);
} else { } else {
DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE); DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
DCHECK_EQ(scope_info->LocalCount(), 1); DCHECK_EQ(scope_info->LocalCount(), 1);
String* name = scope_info->LocalName(0); DCHECK_EQ(scope_info->ContextLocalCount(), 1);
outer_scope = new (zone) DCHECK_EQ(scope_info->ContextLocalMode(0), VAR);
Scope(zone, ast_value_factory->GetString(handle(name, isolate)), DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
handle(scope_info)); String* name = scope_info->ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info->ContextLocalMaybeAssignedFlag(0);
outer_scope =
new (zone) Scope(zone, ast_value_factory->GetString(handle(name)),
maybe_assigned, handle(scope_info));
} }
if (deserialization_mode == DeserializationMode::kScopesOnly) { if (deserialization_mode == DeserializationMode::kScopesOnly) {
outer_scope->scope_info_ = Handle<ScopeInfo>::null(); outer_scope->scope_info_ = Handle<ScopeInfo>::null();
@ -469,9 +489,12 @@ int Scope::num_parameters() const {
void DeclarationScope::DeclareSloppyBlockFunction( void DeclarationScope::DeclareSloppyBlockFunction(
const AstRawString* name, Scope* scope, const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement) { SloppyBlockFunctionStatement* statement) {
auto* delegate = if (sloppy_block_function_map_ == nullptr) {
new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement); sloppy_block_function_map_ =
sloppy_block_function_map_.Declare(zone(), name, delegate); new (zone()->New(sizeof(SloppyBlockFunctionMap)))
SloppyBlockFunctionMap(zone());
}
sloppy_block_function_map_->Declare(zone(), name, scope, statement);
} }
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
@ -481,12 +504,19 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_); DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_); DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
bool has_simple_parameters = HasSimpleParameters(); SloppyBlockFunctionMap* map = sloppy_block_function_map();
if (map == nullptr) return;
const bool has_simple_parameters = HasSimpleParameters();
// The declarations need to be added in the order they were seen,
// so accumulate declared names sorted by index.
ZoneMap<int, const AstRawString*> names_to_declare(zone());
// For each variable which is used as a function declaration in a sloppy // For each variable which is used as a function declaration in a sloppy
// block, // block,
SloppyBlockFunctionMap* map = sloppy_block_function_map();
for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) { for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
AstRawString* name = static_cast<AstRawString*>(p->key); const AstRawString* name = static_cast<AstRawString*>(p->key);
// If the variable wouldn't conflict with a lexical declaration // If the variable wouldn't conflict with a lexical declaration
// or parameter, // or parameter,
@ -509,7 +539,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
} }
} }
Variable* created_variable = nullptr; bool declaration_queued = false;
// Write in assignments to var for each block-scoped function declaration // Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value); auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
@ -543,50 +573,59 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (!should_hoist) continue; if (!should_hoist) continue;
// Declare a var-style binding for the function in the outer scope if (!declaration_queued) {
declaration_queued = true;
names_to_declare.insert({delegate->index(), name});
}
if (factory) { if (factory) {
DCHECK(!is_being_lazily_parsed_); DCHECK(!is_being_lazily_parsed_);
if (created_variable == nullptr) {
VariableProxy* proxy =
factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding check, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
created_variable = DeclareVariable(
declaration, VAR, Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
}
Expression* assignment = factory->NewAssignment( Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name), Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition); delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement = Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition); factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement); delegate->set_statement(statement);
} else { }
DCHECK(is_being_lazily_parsed_); }
if (created_variable == nullptr) { }
created_variable = DeclareVariableName(name, VAR);
if (created_variable != kDummyPreParserVariable && if (names_to_declare.empty()) return;
created_variable != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis); for (const auto& index_and_name : names_to_declare) {
created_variable->set_maybe_assigned(); const AstRawString* name = index_and_name.second;
} if (factory) {
} DCHECK(!is_being_lazily_parsed_);
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding checks, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), false, nullptr,
&ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
Variable* var = DeclareVariableName(name, VAR);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis);
var->set_maybe_assigned();
} }
} }
} }
} }
void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) { void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
RuntimeCallTimerScope runtimeTimer(info->isolate(), AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileScopeAnalysis); &RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL); DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope(); DeclarationScope* scope = info->literal()->scope();
DCHECK(scope->scope_info_.is_null());
Handle<ScopeInfo> outer_scope_info; Handle<ScopeInfo> outer_scope_info;
if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) { if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
@ -595,7 +634,7 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
DeclarationScope(info->zone(), info->ast_value_factory()); DeclarationScope(info->zone(), info->ast_value_factory());
info->set_script_scope(script_scope); info->set_script_scope(script_scope);
scope->ReplaceOuterScope(Scope::DeserializeScopeChain( scope->ReplaceOuterScope(Scope::DeserializeScopeChain(
info->isolate(), info->zone(), *outer_scope_info, script_scope, info->zone(), *outer_scope_info, script_scope,
info->ast_value_factory(), info->ast_value_factory(),
Scope::DeserializationMode::kIncludingVariables)); Scope::DeserializationMode::kIncludingVariables));
} else { } else {
@ -622,13 +661,19 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
// The outer scope is never lazy. // The outer scope is never lazy.
scope->set_should_eager_compile(); scope->set_should_eager_compile();
scope->AllocateVariables(info, mode); if (scope->must_use_preparsed_scope_data_) {
DCHECK(FLAG_preparser_scope_analysis);
DCHECK_NOT_NULL(info->preparsed_scope_data());
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
info->preparsed_scope_data()->RestoreData(scope);
}
scope->AllocateVariables(info, isolate, mode);
// Ensuring that the outer script scope has a scope info avoids having // Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts. // special case for native contexts vs other contexts.
if (info->script_scope()->scope_info_.is_null()) { if (info->script_scope()->scope_info_.is_null()) {
info->script_scope()->scope_info_ = info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
handle(ScopeInfo::Empty(info->isolate()));
} }
#ifdef DEBUG #ifdef DEBUG
@ -722,6 +767,16 @@ Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
return result; return result;
} }
Variable* DeclarationScope::DeclareAsyncGeneratorAwaitVar(
const AstRawString* name) {
DCHECK(is_function_scope());
DCHECK_NULL(async_generator_await_var());
Variable* result = EnsureRareData()->promise = NewTemporary(name);
DCHECK_NULL(promise_var()); // promise is alias for generator await var
result->set_is_used();
return result;
}
bool Scope::HasBeenRemoved() const { bool Scope::HasBeenRemoved() const {
if (sibling() == this) { if (sibling() == this) {
DCHECK_NULL(inner_scope_); DCHECK_NULL(inner_scope_);
@ -778,7 +833,9 @@ Scope* Scope::FinalizeBlockScope() {
unresolved_ = nullptr; unresolved_ = nullptr;
} }
PropagateUsageFlagsToScope(outer_scope_); if (scope_calls_eval_) outer_scope()->scope_calls_eval_ = true;
if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
// This block does not need a context. // This block does not need a context.
num_heap_slots_ = 0; num_heap_slots_ = 0;
@ -820,10 +877,15 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
for (; inner_scope->sibling() != top_inner_scope_; for (; inner_scope->sibling() != top_inner_scope_;
inner_scope = inner_scope->sibling()) { inner_scope = inner_scope->sibling()) {
inner_scope->outer_scope_ = new_parent; inner_scope->outer_scope_ = new_parent;
if (inner_scope->inner_scope_calls_eval_) {
new_parent->inner_scope_calls_eval_ = true;
}
DCHECK_NE(inner_scope, new_parent); DCHECK_NE(inner_scope, new_parent);
} }
inner_scope->outer_scope_ = new_parent; inner_scope->outer_scope_ = new_parent;
if (inner_scope->inner_scope_calls_eval_) {
new_parent->inner_scope_calls_eval_ = true;
}
new_parent->inner_scope_ = new_parent->sibling_; new_parent->inner_scope_ = new_parent->sibling_;
inner_scope->sibling_ = nullptr; inner_scope->sibling_ = nullptr;
// Reset the sibling rather than the inner_scope_ since we // Reset the sibling rather than the inner_scope_ since we
@ -860,6 +922,15 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
} }
outer_closure->locals_.Rewind(top_local_); outer_closure->locals_.Rewind(top_local_);
outer_closure->decls_.Rewind(top_decl_); outer_closure->decls_.Rewind(top_decl_);
// Move eval calls since Snapshot's creation into new_parent.
if (outer_scope_->scope_calls_eval_) {
new_parent->scope_calls_eval_ = true;
new_parent->inner_scope_calls_eval_ = true;
}
// Reset the outer_scope's eval state. It will be restored to its
// original value as necessary in the destructor of this class.
outer_scope_->scope_calls_eval_ = false;
} }
void Scope::ReplaceOuterScope(Scope* outer) { void Scope::ReplaceOuterScope(Scope* outer) {
@ -871,15 +942,6 @@ void Scope::ReplaceOuterScope(Scope* outer) {
outer_scope_ = outer; outer_scope_ = outer;
} }
void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK_NOT_NULL(other);
DCHECK(!already_resolved_);
DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall();
if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
}
Variable* Scope::LookupInScopeInfo(const AstRawString* name) { Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
Handle<String> name_handle = name->string(); Handle<String> name_handle = name->string();
// The Scope is backed up by ScopeInfo. This means it cannot operate in a // The Scope is backed up by ScopeInfo. This means it cannot operate in a
@ -946,7 +1008,7 @@ Variable* Scope::Lookup(const AstRawString* name) {
Variable* DeclarationScope::DeclareParameter( Variable* DeclarationScope::DeclareParameter(
const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest, const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
bool* is_duplicate, AstValueFactory* ast_value_factory) { bool* is_duplicate, AstValueFactory* ast_value_factory, int position) {
DCHECK(!already_resolved_); DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope()); DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_); DCHECK(!has_rest_);
@ -963,6 +1025,7 @@ Variable* DeclarationScope::DeclareParameter(
*is_duplicate = IsDeclaredParameter(name); *is_duplicate = IsDeclaredParameter(name);
} }
has_rest_ = is_rest; has_rest_ = is_rest;
var->set_initializer_position(position);
params_.Add(var, zone()); params_.Add(var, zone());
if (name == ast_value_factory->arguments_string()) { if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true; has_arguments_parameter_ = true;
@ -1071,12 +1134,14 @@ Variable* Scope::DeclareVariable(
// will be a permitted duplicate. // will be a permitted duplicate.
FunctionKind function_kind = FunctionKind function_kind =
declaration->AsFunctionDeclaration()->fun()->kind(); declaration->AsFunctionDeclaration()->fun()->kind();
duplicate_allowed = SloppyBlockFunctionMap* map =
GetDeclarationScope()->sloppy_block_function_map()->Lookup( GetDeclarationScope()->sloppy_block_function_map();
const_cast<AstRawString*>(name), name->hash()) != nullptr && duplicate_allowed = map != nullptr &&
!IsAsyncFunction(function_kind) && map->Lookup(const_cast<AstRawString*>(name),
!(allow_harmony_restrictive_generators && name->hash()) != nullptr &&
IsGeneratorFunction(function_kind)); !IsAsyncFunction(function_kind) &&
!(allow_harmony_restrictive_generators &&
IsGeneratorFunction(function_kind));
} }
if (duplicate_allowed) { if (duplicate_allowed) {
*sloppy_mode_block_scope_function_redefinition = true; *sloppy_mode_block_scope_function_redefinition = true;
@ -1264,7 +1329,8 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr; return nullptr;
} }
void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) { void DeclarationScope::AllocateVariables(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
// Module variables must be allocated before variable resolution // Module variables must be allocated before variable resolution
// to ensure that AccessNeedsHoleCheck() can detect import variables. // to ensure that AccessNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables(); if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
@ -1275,16 +1341,16 @@ void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
MaybeHandle<ScopeInfo> outer_scope; MaybeHandle<ScopeInfo> outer_scope;
if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_; if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_;
AllocateScopeInfosRecursively(info->isolate(), outer_scope); AllocateScopeInfosRecursively(isolate, outer_scope);
if (mode == AnalyzeMode::kDebugger) { if (mode == AnalyzeMode::kDebugger) {
AllocateDebuggerScopeInfos(info->isolate(), outer_scope); AllocateDebuggerScopeInfos(isolate, outer_scope);
} }
// The debugger expects all shared function infos to contain a scope info. // The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure // Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info. // it has one, even if it doesn't need a scope info.
// TODO(jochen|yangguo): Remove this requirement. // TODO(jochen|yangguo): Remove this requirement.
if (scope_info_.is_null()) { if (scope_info_.is_null()) {
scope_info_ = ScopeInfo::Create(info->isolate(), zone(), this, outer_scope); scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
} }
} }
@ -1439,12 +1505,12 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
locals_.Clear(); locals_.Clear();
inner_scope_ = nullptr; inner_scope_ = nullptr;
unresolved_ = nullptr; unresolved_ = nullptr;
sloppy_block_function_map_ = nullptr;
if (aborted) { if (aborted) {
// Prepare scope for use in the outer zone. // Prepare scope for use in the outer zone.
zone_ = ast_value_factory->zone(); zone_ = ast_value_factory->zone();
variables_.Reset(ZoneAllocationPolicy(zone_)); variables_.Reset(ZoneAllocationPolicy(zone_));
sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
if (!IsArrowFunction(function_kind_)) { if (!IsArrowFunction(function_kind_)) {
DeclareDefaultFunctionVariables(ast_value_factory); DeclareDefaultFunctionVariables(ast_value_factory);
} }
@ -1452,7 +1518,6 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
// Make sure this scope isn't used for allocation anymore. // Make sure this scope isn't used for allocation anymore.
zone_ = nullptr; zone_ = nullptr;
variables_.Invalidate(); variables_.Invalidate();
sloppy_block_function_map_.Invalidate();
} }
#ifdef DEBUG #ifdef DEBUG
@ -1487,11 +1552,10 @@ void DeclarationScope::AnalyzePartially(
arguments_ = nullptr; arguments_ = nullptr;
} }
if (FLAG_preparser_scope_analysis) { if (FLAG_preparser_scope_analysis && preparsed_scope_data->Producing()) {
// Decide context allocation for the locals and parameters and store the // Store the information needed for allocating the locals of this scope
// info away. // and its inner scopes.
AllocateVariablesRecursively(); preparsed_scope_data->SaveData(this);
CollectVariableData(preparsed_scope_data);
} }
} }
#ifdef DEBUG #ifdef DEBUG
@ -1564,7 +1628,7 @@ void PrintVar(int indent, Variable* var) {
PrintF(".%p", reinterpret_cast<void*>(var)); PrintF(".%p", reinterpret_cast<void*>(var));
else else
PrintName(var->raw_name()); PrintName(var->raw_name());
PrintF("; // "); PrintF("; // (%p) ", reinterpret_cast<void*>(var));
PrintLocation(var); PrintLocation(var);
bool comma = !var->IsUnallocated(); bool comma = !var->IsUnallocated();
if (var->has_forced_context_allocation()) { if (var->has_forced_context_allocation()) {
@ -1637,7 +1701,8 @@ void Scope::Print(int n) {
function = AsDeclarationScope()->function_var(); function = AsDeclarationScope()->function_var();
} }
PrintF(" { // (%d, %d)\n", start_position(), end_position()); PrintF(" { // (%p) (%d, %d)\n", reinterpret_cast<void*>(this),
start_position(), end_position());
if (is_hidden()) { if (is_hidden()) {
Indent(n1, "// is hidden\n"); Indent(n1, "// is hidden\n");
} }
@ -2269,17 +2334,6 @@ void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
} }
} }
void Scope::CollectVariableData(PreParsedScopeData* data) {
PreParsedScopeData::ScopeScope scope_scope(data, scope_type(),
start_position(), end_position());
for (Variable* local : locals_) {
scope_scope.MaybeAddVariable(local);
}
for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
inner->CollectVariableData(data);
}
}
int Scope::StackLocalCount() const { int Scope::StackLocalCount() const {
Variable* function = Variable* function =
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr; is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;

77
deps/v8/src/ast/scopes.h

@ -53,22 +53,27 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
public: public:
class Delegate : public ZoneObject { class Delegate : public ZoneObject {
public: public:
explicit Delegate(Scope* scope, Delegate(Scope* scope, SloppyBlockFunctionStatement* statement, int index)
SloppyBlockFunctionStatement* statement = nullptr) : scope_(scope), statement_(statement), next_(nullptr), index_(index) {}
: scope_(scope), statement_(statement), next_(nullptr) {}
void set_statement(Statement* statement); void set_statement(Statement* statement);
void set_next(Delegate* next) { next_ = next; } void set_next(Delegate* next) { next_ = next; }
Delegate* next() const { return next_; } Delegate* next() const { return next_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
int index() const { return index_; }
private: private:
Scope* scope_; Scope* scope_;
SloppyBlockFunctionStatement* statement_; SloppyBlockFunctionStatement* statement_;
Delegate* next_; Delegate* next_;
int index_;
}; };
explicit SloppyBlockFunctionMap(Zone* zone); explicit SloppyBlockFunctionMap(Zone* zone);
void Declare(Zone* zone, const AstRawString* name, Delegate* delegate); void Declare(Zone* zone, const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement);
private:
int count_;
}; };
enum class AnalyzeMode { kRegular, kDebugger }; enum class AnalyzeMode { kRegular, kDebugger };
@ -112,6 +117,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
class Snapshot final BASE_EMBEDDED { class Snapshot final BASE_EMBEDDED {
public: public:
explicit Snapshot(Scope* scope); explicit Snapshot(Scope* scope);
~Snapshot();
void Reparent(DeclarationScope* new_parent) const; void Reparent(DeclarationScope* new_parent) const;
@ -121,12 +127,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableProxy* top_unresolved_; VariableProxy* top_unresolved_;
ThreadedList<Variable>::Iterator top_local_; ThreadedList<Variable>::Iterator top_local_;
ThreadedList<Declaration>::Iterator top_decl_; ThreadedList<Declaration>::Iterator top_decl_;
const bool outer_scope_calls_eval_;
}; };
enum class DeserializationMode { kIncludingVariables, kScopesOnly }; enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone, static Scope* DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
ScopeInfo* scope_info,
DeclarationScope* script_scope, DeclarationScope* script_scope,
AstValueFactory* ast_value_factory, AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode); DeserializationMode deserialization_mode);
@ -146,12 +152,22 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Assumes outer_scope_ is non-null. // Assumes outer_scope_ is non-null.
void ReplaceOuterScope(Scope* outer_scope); void ReplaceOuterScope(Scope* outer_scope);
// Propagates any eagerly-gathered scope usage flags (such as calls_eval())
// to the passed-in scope.
void PropagateUsageFlagsToScope(Scope* other);
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
void SetMustUsePreParsedScopeData() {
if (must_use_preparsed_scope_data_) {
return;
}
must_use_preparsed_scope_data_ = true;
if (outer_scope_) {
outer_scope_->SetMustUsePreParsedScopeData();
}
}
bool must_use_preparsed_scope_data() const {
return must_use_preparsed_scope_data_;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Declarations // Declarations
@ -357,10 +373,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// The scope immediately surrounding this scope, or NULL. // The scope immediately surrounding this scope, or NULL.
Scope* outer_scope() const { return outer_scope_; } Scope* outer_scope() const { return outer_scope_; }
const AstRawString* catch_variable_name() const { Variable* catch_variable() const {
DCHECK(is_catch_scope()); DCHECK(is_catch_scope());
DCHECK_EQ(1, num_var()); DCHECK_EQ(1, num_var());
return static_cast<AstRawString*>(variables_.Start()->key); return static_cast<Variable*>(variables_.Start()->value);
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -546,12 +562,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Temporary workaround that allows masking of 'this' in debug-evalute scopes. // Temporary workaround that allows masking of 'this' in debug-evalute scopes.
bool is_debug_evaluate_scope_ : 1; bool is_debug_evaluate_scope_ : 1;
// True if one of the inner scopes or the scope itself calls eval.
bool inner_scope_calls_eval_ : 1; bool inner_scope_calls_eval_ : 1;
bool force_context_allocation_ : 1; bool force_context_allocation_ : 1;
// True if it holds 'var' declarations. // True if it holds 'var' declarations.
bool is_declaration_scope_ : 1; bool is_declaration_scope_ : 1;
bool must_use_preparsed_scope_data_ : 1;
// Create a non-local variable with a given name. // Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime. // These variables are looked up dynamically at runtime.
Variable* NonLocal(const AstRawString* name, VariableMode mode); Variable* NonLocal(const AstRawString* name, VariableMode mode);
@ -590,14 +609,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void AllocateDebuggerScopeInfos(Isolate* isolate, void AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope); MaybeHandle<ScopeInfo> outer_scope);
void CollectVariableData(PreParsedScopeData* data);
// Construct a scope based on the scope info. // Construct a scope based on the scope info.
Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info); Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name. // Construct a catch scope with a binding for the name.
Scope(Zone* zone, const AstRawString* catch_variable_name, Scope(Zone* zone, const AstRawString* catch_variable_name,
Handle<ScopeInfo> scope_info); MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) { void AddInnerScope(Scope* inner_scope) {
inner_scope->sibling_ = inner_scope_; inner_scope->sibling_ = inner_scope_;
@ -686,13 +703,14 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Ignition without ScopeInfo. // Ignition without ScopeInfo.
Variable* DeclareGeneratorObjectVar(const AstRawString* name); Variable* DeclareGeneratorObjectVar(const AstRawString* name);
Variable* DeclarePromiseVar(const AstRawString* name); Variable* DeclarePromiseVar(const AstRawString* name);
Variable* DeclareAsyncGeneratorAwaitVar(const AstRawString* name);
// Declare a parameter in this scope. When there are duplicated // Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation // parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right. // expects all parameters to be declared and from left to right.
Variable* DeclareParameter(const AstRawString* name, VariableMode mode, Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
bool is_optional, bool is_rest, bool* is_duplicate, bool is_optional, bool is_rest, bool* is_duplicate,
AstValueFactory* ast_value_factory); AstValueFactory* ast_value_factory, int position);
// Declares that a parameter with the name exists. Creates a Variable and // Declares that a parameter with the name exists. Creates a Variable and
// returns it if FLAG_preparser_scope_analysis is on. // returns it if FLAG_preparser_scope_analysis is on.
@ -738,9 +756,16 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* promise_var() const { Variable* promise_var() const {
DCHECK(is_function_scope()); DCHECK(is_function_scope());
DCHECK(IsAsyncFunction(function_kind_)); DCHECK(IsAsyncFunction(function_kind_));
if (IsAsyncGeneratorFunction(function_kind_)) return nullptr;
return GetRareVariable(RareVariable::kPromise); return GetRareVariable(RareVariable::kPromise);
} }
Variable* async_generator_await_var() const {
DCHECK(is_function_scope());
DCHECK(IsAsyncGeneratorFunction(function_kind_));
return GetRareVariable(RareVariable::kAsyncGeneratorAwaitResult);
}
// Parameters. The left-most parameter has index 0. // Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes. // Only valid for function and module scopes.
Variable* parameter(int index) const { Variable* parameter(int index) const {
@ -805,13 +830,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void HoistSloppyBlockFunctions(AstNodeFactory* factory); void HoistSloppyBlockFunctions(AstNodeFactory* factory);
SloppyBlockFunctionMap* sloppy_block_function_map() { SloppyBlockFunctionMap* sloppy_block_function_map() {
return &sloppy_block_function_map_; return sloppy_block_function_map_;
} }
// Compute top scope and allocate variables. For lazy compilation the top // Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this // scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly. // doesn't re-allocate variables repeatedly.
static void Analyze(ParseInfo* info, AnalyzeMode mode); static void Analyze(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
// To be called during parsing. Do just enough scope analysis that we can // To be called during parsing. Do just enough scope analysis that we can
// discard the Scope for lazily compiled functions. In particular, this // discard the Scope for lazily compiled functions. In particular, this
@ -848,6 +873,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted); void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
bool is_skipped_function() const { return is_skipped_function_; }
void set_is_skipped_function(bool is_skipped_function) {
is_skipped_function_ = is_skipped_function;
}
private: private:
void AllocateParameter(Variable* var, int index); void AllocateParameter(Variable* var, int index);
@ -859,7 +889,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// In the case of code compiled and run using 'eval', the context // In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other // parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle. // cases the context parameter is an empty handle.
void AllocateVariables(ParseInfo* info, AnalyzeMode mode); void AllocateVariables(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
void SetDefaults(); void SetDefaults();
@ -884,11 +914,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
#if DEBUG #if DEBUG
bool is_being_lazily_parsed_ : 1; bool is_being_lazily_parsed_ : 1;
#endif #endif
bool is_skipped_function_ : 1;
// Parameter list in source order. // Parameter list in source order.
ZoneList<Variable*> params_; ZoneList<Variable*> params_;
// Map of function names to lists of functions defined in sloppy blocks // Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap sloppy_block_function_map_; SloppyBlockFunctionMap* sloppy_block_function_map_;
// Convenience variable. // Convenience variable.
Variable* receiver_; Variable* receiver_;
// Function variable, if any; function scopes only. // Function variable, if any; function scopes only.
@ -912,7 +943,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
enum class RareVariable { enum class RareVariable {
kThisFunction = offsetof(RareData, this_function), kThisFunction = offsetof(RareData, this_function),
kGeneratorObject = offsetof(RareData, generator_object), kGeneratorObject = offsetof(RareData, generator_object),
kPromise = offsetof(RareData, promise) kPromise = offsetof(RareData, promise),
kAsyncGeneratorAwaitResult = kPromise
}; };
V8_INLINE RareData* EnsureRareData() { V8_INLINE RareData* EnsureRareData() {
@ -950,8 +982,7 @@ class ModuleScope final : public DeclarationScope {
// The generated ModuleDescriptor does not preserve all information. In // The generated ModuleDescriptor does not preserve all information. In
// particular, its module_requests map will be empty because we no longer need // particular, its module_requests map will be empty because we no longer need
// the map after parsing. // the map after parsing.
ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info, ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* ast_value_factory);
AstValueFactory* ast_value_factory);
ModuleDescriptor* module() const { ModuleDescriptor* module() const {
DCHECK_NOT_NULL(module_descriptor_); DCHECK_NOT_NULL(module_descriptor_);

6
deps/v8/src/ast/variables.h

@ -100,6 +100,12 @@ class Variable final : public ZoneObject {
int index() const { return index_; } int index() const { return index_; }
bool IsReceiver() const {
DCHECK(IsParameter());
return index_ == -1;
}
bool IsExport() const { bool IsExport() const {
DCHECK_EQ(location(), VariableLocation::MODULE); DCHECK_EQ(location(), VariableLocation::MODULE);
DCHECK_NE(index(), 0); DCHECK_NE(index(), 0);

9
deps/v8/src/background-parsing-task.cc

@ -29,12 +29,11 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which // Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing. // will happen in the main thread after parsing.
ParseInfo* info = new ParseInfo(isolate->allocator()); ParseInfo* info = new ParseInfo(isolate->allocator());
info->InitFromIsolate(isolate);
info->set_toplevel(); info->set_toplevel();
source->info.reset(info); source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get()); info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding); info->set_source_stream_encoding(source->encoding);
info->set_hash_seed(isolate->heap()->HashSeed());
info->set_unicode_cache(&source_->unicode_cache); info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options); info->set_compile_options(options);
info->set_allow_lazy_parsing(); info->set_allow_lazy_parsing();
@ -58,11 +57,6 @@ void BackgroundParsingTask::Run() {
uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB; uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
source_->parser->set_stack_limit(stack_limit); source_->parser->set_stack_limit(stack_limit);
// Nullify the Isolate temporarily so that the background parser doesn't
// accidentally use it.
Isolate* isolate = source_->info->isolate();
source_->info->set_isolate(nullptr);
source_->parser->ParseOnBackground(source_->info.get()); source_->parser->ParseOnBackground(source_->info.get());
if (script_data_ != nullptr) { if (script_data_ != nullptr) {
@ -73,7 +67,6 @@ void BackgroundParsingTask::Run() {
delete script_data_; delete script_data_;
script_data_ = nullptr; script_data_ = nullptr;
} }
source_->info->set_isolate(isolate);
} }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

12
deps/v8/src/bailout-reason.h

@ -58,6 +58,7 @@ namespace internal {
"Encountered a do-expression with unmodelable control statements") \ "Encountered a do-expression with unmodelable control statements") \
V(kDoPushArgumentNotImplementedForDoubleType, \ V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \ "DoPushArgument not implemented for double type") \
V(kDynamicImport, "Dynamic module import") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \ V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \ V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \ "EmitLoadRegister: Unsupported double immediate") \
@ -119,8 +120,6 @@ namespace internal {
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \ V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \ V(kInvalidMinLength, "Invalid min_length") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \ V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \ V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLiveEdit, "LiveEdit") \ V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \ V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
@ -142,7 +141,6 @@ namespace internal {
"Not enough virtual registers (regalloc)") \ "Not enough virtual registers (regalloc)") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \ V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \
V(kOperandIsASmiAndNotABoundFunction, \ V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \ "Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \ V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
@ -155,13 +153,10 @@ namespace internal {
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \ V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFunction, "Operand is not a function") \ V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotAReceiver, "Operand is not a receiver") \ V(kOperandIsNotAReceiver, "Operand is not a receiver") \
V(kOperandIsNotASmi, "Operand is not a smi") \ V(kOperandIsNotASmi, "Operand is not a smi") \
V(kOperandIsNotAString, "Operand is not a string") \ V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \ V(kOperandIsNotSmi, "Operand is not smi") \
V(kOperandNotANumber, "Operand not a number") \
V(kObjectTagged, "The object is tagged") \ V(kObjectTagged, "The object is tagged") \
V(kObjectNotTagged, "The object is not tagged") \ V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \ V(kOptimizationDisabled, "Optimization disabled") \
@ -237,8 +232,9 @@ namespace internal {
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \ V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \ V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \ V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \ V(kUnexpectedTestTypeofLiteralFlag, \
"Unexpected type for RegExp data, FixedArray expected") \ "Unexpected literal flag for TestTypeof bytecode") \
V(kUnexpectedRegExpExecCall, "Unexpected call to the RegExpExecStub") \
V(kUnexpectedValue, "Unexpected value") \ V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \ V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \ V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \

3
deps/v8/src/base/cpu.cc

@ -596,7 +596,10 @@ CPU::CPU()
CPUInfo cpu_info; CPUInfo cpu_info;
char* cpu_model = cpu_info.ExtractField("cpu model"); char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU"); has_fpu_ = HasListItem(cpu_model, "FPU");
char* ASEs = cpu_info.ExtractField("ASEs implemented");
has_msa_ = HasListItem(ASEs, "msa");
delete[] cpu_model; delete[] cpu_model;
delete[] ASEs;
#ifdef V8_HOST_ARCH_MIPS #ifdef V8_HOST_ARCH_MIPS
is_fp64_mode_ = __detect_fp64_mode(); is_fp64_mode_ = __detect_fp64_mode();
architecture_ = __detect_mips_arch_revision(); architecture_ = __detect_mips_arch_revision();

2
deps/v8/src/base/cpu.h

@ -113,6 +113,7 @@ class V8_BASE_EXPORT CPU final {
// mips features // mips features
bool is_fp64_mode() const { return is_fp64_mode_; } bool is_fp64_mode() const { return is_fp64_mode_; }
bool has_msa() const { return has_msa_; }
private: private:
char vendor_[13]; char vendor_[13];
@ -154,6 +155,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3_d32_; bool has_vfp3_d32_;
bool is_fp64_mode_; bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_; bool has_non_stop_time_stamp_counter_;
bool has_msa_;
}; };
} // namespace base } // namespace base

2
deps/v8/src/base/debug/stack_trace.h

@ -38,7 +38,7 @@ V8_BASE_EXPORT void DisableSignalStackDump();
// A stacktrace can be helpful in debugging. For example, you can include a // A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you // stacktrace member in a object (probably around #ifndef NDEBUG) so that you
// can later see where the given object was created from. // can later see where the given object was created from.
class StackTrace { class V8_BASE_EXPORT StackTrace {
public: public:
// Creates a stacktrace from the current location. // Creates a stacktrace from the current location.
StackTrace(); StackTrace();

7
deps/v8/src/base/iterator.h

@ -26,9 +26,10 @@ class iterator_range {
typename std::iterator_traits<iterator>::difference_type difference_type; typename std::iterator_traits<iterator>::difference_type difference_type;
iterator_range() : begin_(), end_() {} iterator_range() : begin_(), end_() {}
template <typename ForwardIterator2> template <typename ForwardIterator1, typename ForwardIterator2>
iterator_range(ForwardIterator2 const& begin, ForwardIterator2 const& end) iterator_range(ForwardIterator1&& begin, ForwardIterator2&& end)
: begin_(begin), end_(end) {} : begin_(std::forward<ForwardIterator1>(begin)),
end_(std::forward<ForwardIterator2>(end)) {}
iterator begin() { return begin_; } iterator begin() { return begin_; }
iterator end() { return end_; } iterator end() { return end_; }

16
deps/v8/src/base/logging.cc

@ -4,6 +4,7 @@
#include "src/base/logging.h" #include "src/base/logging.h"
#include <cstdarg>
#include <cstdio> #include <cstdio>
#include <cstdlib> #include <cstdlib>
@ -13,6 +14,16 @@
namespace v8 { namespace v8 {
namespace base { namespace base {
namespace {
void (*g_print_stack_trace)() = nullptr;
} // namespace
void SetPrintStackTrace(void (*print_stack_trace)()) {
g_print_stack_trace = print_stack_trace;
}
// Explicit instantiations for commonly used comparisons. // Explicit instantiations for commonly used comparisons.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \ #define DEFINE_MAKE_CHECK_OP_STRING(type) \
template std::string* MakeCheckOpString<type, type>(type, type, char const*); template std::string* MakeCheckOpString<type, type>(type, type, char const*);
@ -57,11 +68,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
va_end(arguments); va_end(arguments);
v8::base::OS::PrintError("\n#\n"); v8::base::OS::PrintError("\n#\n");
v8::base::debug::StackTrace trace; if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
trace.Print();
fflush(stderr); fflush(stderr);
// Avoid dumping stack trace on abort signal.
v8::base::debug::DisableSignalStackDump();
v8::base::OS::Abort(); v8::base::OS::Abort();
} }

3
deps/v8/src/base/logging.h

@ -37,6 +37,9 @@ extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
namespace v8 { namespace v8 {
namespace base { namespace base {
// Overwrite the default function that prints a stack trace.
V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
// CHECK dies with a fatal error if condition is not true. It is *not* // CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by DEBUG, so the check will be executed regardless of // controlled by DEBUG, so the check will be executed regardless of
// compilation mode. // compilation mode.

2
deps/v8/src/base/platform/mutex.cc

@ -104,7 +104,7 @@ static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) { static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
return TryEnterCriticalSection(cs); return TryEnterCriticalSection(cs) != FALSE;
} }
#endif // V8_OS_POSIX #endif // V8_OS_POSIX

15
deps/v8/src/base/platform/platform-aix.cc

@ -29,9 +29,9 @@
#undef MAP_TYPE #undef MAP_TYPE
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
namespace v8 { namespace v8 {
namespace base { namespace base {
@ -42,8 +42,15 @@ static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
return mmap(addr, len, prot, flags, fildes, off); return mmap(addr, len, prot, flags, fildes, off);
} }
class AIXTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
double LocalTimeOffset() override;
const char* OS::LocalTimezone(double time, TimezoneCache* cache) { ~AIXTimezoneCache() override {}
};
const char* AIXTimezoneCache::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time / msPerSecond)); time_t tv = static_cast<time_t>(floor(time / msPerSecond));
struct tm tm; struct tm tm;
@ -52,8 +59,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
return tzname[0]; // The location of the timezone string on AIX. return tzname[0]; // The location of the timezone string on AIX.
} }
double AIXTimezoneCache::LocalTimeOffset() {
double OS::LocalTimeOffset(TimezoneCache* cache) {
// On AIX, struct tm does not contain a tm_gmtoff field. // On AIX, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL); time_t utc = time(NULL);
DCHECK(utc != -1); DCHECK(utc != -1);
@ -63,6 +69,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
return static_cast<double>((mktime(loc) - utc) * msPerSecond); return static_cast<double>((mktime(loc) - utc) * msPerSecond);
} }
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) { void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) {
const size_t msize = RoundUp(requested, getpagesize()); const size_t msize = RoundUp(requested, getpagesize());

13
deps/v8/src/base/platform/platform-cygwin.cc

@ -19,14 +19,22 @@
#undef MAP_TYPE #undef MAP_TYPE
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/win32-headers.h" #include "src/base/win32-headers.h"
namespace v8 { namespace v8 {
namespace base { namespace base {
class CygwinTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
const char* OS::LocalTimezone(double time, TimezoneCache* cache) { double LocalTimeOffset() override;
~CygwinTimezoneCache() override {}
};
const char* CygwinTimezoneCache::LocalTimezone(double time) {
if (std::isnan(time)) return ""; if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond)); time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm; struct tm tm;
@ -35,8 +43,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
return tzname[0]; // The location of the timezone string on Cygwin. return tzname[0]; // The location of the timezone string on Cygwin.
} }
double CygwinTimezoneCache::LocalTimeOffset() {
double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field. // On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL); time_t utc = time(NULL);
DCHECK(utc != -1); DCHECK(utc != -1);

23
deps/v8/src/base/platform/platform-freebsd.cc

@ -29,32 +29,13 @@
#undef MAP_TYPE #undef MAP_TYPE
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
namespace v8 { namespace v8 {
namespace base { namespace base {
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
void* OS::Allocate(const size_t requested, void* OS::Allocate(const size_t requested,
size_t* allocated, size_t* allocated,

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save