Browse Source

deps: update V8 to 5.9.211.32

PR-URL: https://github.com/nodejs/node/pull/13263
Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com>
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
v6
Michaël Zasso 8 years ago
parent
commit
3dc8c3bed4
  1. 46
      deps/v8/.gitignore
  2. 6
      deps/v8/.gn
  3. 3
      deps/v8/AUTHORS
  4. 444
      deps/v8/BUILD.gn
  5. 1145
      deps/v8/ChangeLog
  6. 20
      deps/v8/DEPS
  7. 1
      deps/v8/OWNERS
  8. 28
      deps/v8/PRESUBMIT.py
  9. 7
      deps/v8/gni/isolate.gni
  10. 6
      deps/v8/gni/v8.gni
  11. 12
      deps/v8/gypfiles/all.gyp
  12. 3
      deps/v8/gypfiles/features.gypi
  13. 1
      deps/v8/gypfiles/isolate.gypi
  14. 25
      deps/v8/gypfiles/standalone.gypi
  15. 1
      deps/v8/gypfiles/toolchain.gypi
  16. 8
      deps/v8/include/libplatform/libplatform.h
  17. 43
      deps/v8/include/v8-debug.h
  18. 58
      deps/v8/include/v8-experimental.h
  19. 2
      deps/v8/include/v8-inspector.h
  20. 8
      deps/v8/include/v8-platform.h
  21. 5
      deps/v8/include/v8-profiler.h
  22. 6
      deps/v8/include/v8-version.h
  23. 646
      deps/v8/include/v8.h
  24. 6
      deps/v8/infra/config/cq.cfg
  25. 69
      deps/v8/infra/mb/mb_config.pyl
  26. 3
      deps/v8/src/DEPS
  27. 1
      deps/v8/src/OWNERS
  28. 139
      deps/v8/src/api-experimental.cc
  29. 28
      deps/v8/src/api-experimental.h
  30. 42
      deps/v8/src/api-natives.cc
  31. 791
      deps/v8/src/api.cc
  32. 15
      deps/v8/src/api.h
  33. 67
      deps/v8/src/arm/assembler-arm-inl.h
  34. 576
      deps/v8/src/arm/assembler-arm.cc
  35. 503
      deps/v8/src/arm/assembler-arm.h
  36. 406
      deps/v8/src/arm/code-stubs-arm.cc
  37. 4
      deps/v8/src/arm/code-stubs-arm.h
  38. 14
      deps/v8/src/arm/codegen-arm.cc
  39. 40
      deps/v8/src/arm/deoptimizer-arm.cc
  40. 162
      deps/v8/src/arm/disasm-arm.cc
  41. 57
      deps/v8/src/arm/interface-descriptors-arm.cc
  42. 217
      deps/v8/src/arm/macro-assembler-arm.cc
  43. 102
      deps/v8/src/arm/macro-assembler-arm.h
  44. 1188
      deps/v8/src/arm/simulator-arm.cc
  45. 8
      deps/v8/src/arm/simulator-arm.h
  46. 55
      deps/v8/src/arm64/assembler-arm64-inl.h
  47. 63
      deps/v8/src/arm64/assembler-arm64.cc
  48. 170
      deps/v8/src/arm64/assembler-arm64.h
  49. 487
      deps/v8/src/arm64/code-stubs-arm64.cc
  50. 40
      deps/v8/src/arm64/code-stubs-arm64.h
  51. 2
      deps/v8/src/arm64/codegen-arm64.cc
  52. 11
      deps/v8/src/arm64/constants-arm64.h
  53. 53
      deps/v8/src/arm64/deoptimizer-arm64.cc
  54. 11
      deps/v8/src/arm64/disasm-arm64.cc
  55. 1
      deps/v8/src/arm64/eh-frame-arm64.cc
  56. 21
      deps/v8/src/arm64/instructions-arm64.cc
  57. 10
      deps/v8/src/arm64/instructions-arm64.h
  58. 4
      deps/v8/src/arm64/instrument-arm64.cc
  59. 56
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  60. 27
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  61. 176
      deps/v8/src/arm64/macro-assembler-arm64.cc
  62. 93
      deps/v8/src/arm64/macro-assembler-arm64.h
  63. 314
      deps/v8/src/arm64/simulator-arm64.cc
  64. 91
      deps/v8/src/arm64/simulator-arm64.h
  65. 82
      deps/v8/src/asmjs/asm-js.cc
  66. 110
      deps/v8/src/asmjs/asm-names.h
  67. 2449
      deps/v8/src/asmjs/asm-parser.cc
  68. 316
      deps/v8/src/asmjs/asm-parser.h
  69. 431
      deps/v8/src/asmjs/asm-scanner.cc
  70. 165
      deps/v8/src/asmjs/asm-scanner.h
  71. 12
      deps/v8/src/asmjs/asm-wasm-builder.cc
  72. 120
      deps/v8/src/assembler.cc
  73. 81
      deps/v8/src/assembler.h
  74. 9
      deps/v8/src/ast/ast-expression-rewriter.cc
  75. 73
      deps/v8/src/ast/ast-numbering.cc
  76. 30
      deps/v8/src/ast/ast-numbering.h
  77. 9
      deps/v8/src/ast/ast-traversal-visitor.h
  78. 4
      deps/v8/src/ast/ast-types.cc
  79. 84
      deps/v8/src/ast/ast-value-factory.cc
  80. 211
      deps/v8/src/ast/ast-value-factory.h
  81. 64
      deps/v8/src/ast/ast.cc
  82. 173
      deps/v8/src/ast/ast.h
  83. 2
      deps/v8/src/ast/context-slot-cache.h
  84. 3
      deps/v8/src/ast/modules.h
  85. 73
      deps/v8/src/ast/prettyprinter.cc
  86. 252
      deps/v8/src/ast/scopes.cc
  87. 77
      deps/v8/src/ast/scopes.h
  88. 6
      deps/v8/src/ast/variables.h
  89. 9
      deps/v8/src/background-parsing-task.cc
  90. 12
      deps/v8/src/bailout-reason.h
  91. 3
      deps/v8/src/base/cpu.cc
  92. 2
      deps/v8/src/base/cpu.h
  93. 2
      deps/v8/src/base/debug/stack_trace.h
  94. 7
      deps/v8/src/base/iterator.h
  95. 16
      deps/v8/src/base/logging.cc
  96. 3
      deps/v8/src/base/logging.h
  97. 2
      deps/v8/src/base/platform/mutex.cc
  98. 15
      deps/v8/src/base/platform/platform-aix.cc
  99. 13
      deps/v8/src/base/platform/platform-cygwin.cc
  100. 23
      deps/v8/src/base/platform/platform-freebsd.cc

46
deps/v8/.gitignore

@ -1,3 +1,5 @@
#*#
*.Makefile
*.a
*.exe
*.idb
@ -18,9 +20,9 @@
*.vcxproj
*.vcxproj.filters
*.xcodeproj
#*#
*~
.#*
.*.sw?
.cpplint-cache
.cproject
.d8_history
@ -30,26 +32,23 @@
.project
.pydevproject
.settings
.*.sw?
bsuite
compile_commands.json
d8
d8_g
gccauses
gcsuspects
shell
shell_g
/_*
/build
/gypfiles/win_toolchain.json
/buildtools
/gypfiles/win_toolchain.json
/hydrogen.cfg
/obj
/out
/out.gn
/perf.data
/perf.data.old
/src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data
/test/fuzzer/wasm
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs
/test/fuzzer/wasm_asmjs.tar.gz
/test/mozilla/data
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
@ -57,6 +56,7 @@ shell_g
/test/test262/data
/test/test262/data.tar
/test/test262/harness
/test/wasm-js
/testing/gmock
/testing/gtest/*
!/testing/gtest/include
@ -81,26 +81,26 @@ shell_g
/tools/swarming_client
/tools/visual_studio/Debug
/tools/visual_studio/Release
/test/fuzzer/wasm
/test/fuzzer/wasm_asmjs
/v8.log.ll
/xcodebuild
TAGS
*.Makefile
GTAGS
GPATH
GRTAGS
GSYMS
GPATH
tags
GTAGS
TAGS
bsuite
compile_commands.json
d8
d8_g
gccauses
gcsuspects
gtags.files
shell
shell_g
tags
turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs.tar.gz
/src/inspector/build/closure-compiler.tar.gz
/src/inspector/build/closure-compiler
/test/wasm-js
!/third_party/jinja2
!/third_party/markupsafe

6
deps/v8/.gn

@ -21,5 +21,7 @@ check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls.
exec_script_whitelist =
build_dotfile_settings.exec_script_whitelist + [ "//test/test262/BUILD.gn" ]
exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + [
"//test/test262/BUILD.gn",
"//BUILD.gn",
]

3
deps/v8/AUTHORS

@ -1,4 +1,4 @@
# Below is a list of people and organizations that have contributed
# Below is a list of people and organizations that have contributed
# to the V8 project. Names should be added to the list like so:
#
# Name/Organization <email address>
@ -82,6 +82,7 @@ JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
Loo Rong Jie <loorongjie@gmail.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com>

444
deps/v8/BUILD.gn

@ -20,6 +20,12 @@ declare_args() {
# Print to stdout on Android.
v8_android_log_stdout = false
# Sets -DV8_ENABLE_FUTURE.
v8_enable_future = false
# Sets -DV8_DISABLE_TURBO.
v8_disable_turbo = false
# Sets -DVERIFY_HEAP.
v8_enable_verify_heap = ""
@ -69,6 +75,9 @@ declare_args() {
# Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = ""
# Builds the snapshot with --trace-ignition
v8_trace_ignition = false
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@ -101,6 +110,19 @@ declare_args() {
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
# Set v8_host_byteorder
v8_host_byteorder = "little"
# ppc64 can be either BE or LE
if (host_cpu == "ppc64") {
v8_host_byteorder =
exec_script("//tools/get_byteorder.py", [], "trim string")
}
if (host_cpu == "ppc" || host_cpu == "s390" || host_cpu == "s390x" ||
host_cpu == "mips" || host_cpu == "mips64") {
v8_host_byteorder = "big"
}
}
# Derived defaults.
@ -125,7 +147,6 @@ if (v8_enable_v8_checks == "") {
# snapshots.
is_target_simulator = target_cpu != v8_target_cpu
v8_generated_peephole_source = "$target_gen_dir/bytecode-peephole-table.cc"
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@ -178,10 +199,10 @@ config("external_config") {
if (is_component_build) {
defines = [ "USING_V8_SHARED" ]
}
include_dirs = [ "include" ]
if (v8_enable_inspector) {
include_dirs += [ "$target_gen_dir/include" ]
}
include_dirs = [
"include",
"$target_gen_dir/include",
]
}
# This config should only be applied to code that needs to be explicitly
@ -204,6 +225,12 @@ config("features") {
defines +=
[ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ]
}
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
if (v8_disable_turbo) {
defines += [ "V8_DISABLE_TURBO" ]
}
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
@ -240,6 +267,9 @@ config("features") {
if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ]
}
if (v8_use_snapshot) {
defines += [ "V8_USE_SNAPSHOT" ]
}
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
@ -356,8 +386,31 @@ config("toolchain") {
if (v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390X" ]
}
if (host_cpu == "x64" || host_cpu == "x86") {
if (v8_host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ]
} else {
cflags += [ "-march=z196" ]
}
}
if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC" ]
if (v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC64" ]
}
if (v8_host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_PPC_LE" ]
} else if (v8_host_byteorder == "big") {
defines += [ "V8_TARGET_ARCH_PPC_BE" ]
if (current_os == "aix") {
cflags += [
# Work around AIX ceil, trunc and round oddities.
"-mcpu=power5+",
"-mfprnd",
# Work around AIX assembler popcntb bug.
"-mno-popcntb",
]
}
}
}
if (v8_current_cpu == "x86") {
@ -414,10 +467,25 @@ config("toolchain") {
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
#if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
# v8_current_cpu == "mips64el") {
# cflags += [ "-Wshorten-64-to-32" ]
#}
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el") {
cflags += [ "-Wshorten-64-to-32" ]
}
}
if (is_win) {
cflags += [
"/wd4245", # Conversion with signed/unsigned mismatch.
"/wd4267", # Conversion with possible loss of data.
"/wd4324", # Padding structure due to alignment.
"/wd4701", # Potentially uninitialized local variable.
"/wd4702", # Unreachable code.
"/wd4703", # Potentially uninitialized local pointer variable.
"/wd4709", # Comma operator within array index expr (bugged).
"/wd4714", # Function marked forceinline not inlined.
"/wd4718", # Recursive call has no side-effect.
"/wd4800", # Forcing value to bool.
]
}
}
@ -445,7 +513,6 @@ action("js2c") {
"src/js/v8natives.js",
"src/js/array.js",
"src/js/string.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
"src/js/collection.js",
"src/js/weak-collection.js",
@ -483,43 +550,6 @@ action("js2c") {
}
}
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [
"tools/jsmin.py",
]
# NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
"src/js/harmony-atomics.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc",
]
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc",
root_build_dir),
"EXPERIMENTAL",
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_experimental.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir),
]
}
}
action("js2c_extras") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -630,7 +660,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_experimental_extras",
":js2c_extras",
]
@ -638,7 +667,6 @@ if (v8_use_external_startup_data) {
# NOSORT
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
"$target_gen_dir/libraries_experimental_extras.bin",
]
@ -714,6 +742,10 @@ action("run_mksnapshot") {
]
}
if (v8_trace_ignition) {
args += [ "--trace-ignition" ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
@ -728,29 +760,6 @@ action("run_mksnapshot") {
}
}
action("run_mkpeephole") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":mkpeephole($v8_snapshot_toolchain)",
]
outputs = [
v8_generated_peephole_source,
]
sources = []
script = "tools/run.py"
args = [
"./" + rebase_path(get_label_info(":mkpeephole($v8_snapshot_toolchain)",
"root_out_dir") + "/mkpeephole",
root_build_dir),
rebase_path(v8_generated_peephole_source, root_build_dir),
]
}
action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
outputs = [
@ -769,7 +778,6 @@ action("v8_dump_build_config") {
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_inspector=$v8_enable_inspector",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
@ -791,6 +799,7 @@ source_set("v8_maybe_snapshot") {
} else {
# Ignore v8_use_external_startup_data setting if no snapshot is used.
public_deps = [
":v8_builtins_setup",
":v8_nosnapshot",
]
}
@ -801,7 +810,6 @@ v8_source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_experimental",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
@ -809,7 +817,6 @@ v8_source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"src/snapshot/snapshot-empty.cc",
@ -828,7 +835,6 @@ v8_source_set("v8_snapshot") {
deps = [
":js2c",
":js2c_experimental",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
@ -841,10 +847,10 @@ v8_source_set("v8_snapshot") {
sources = [
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/setup-isolate-deserialize.cc",
]
configs = [ ":internal_config" ]
@ -856,7 +862,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
@ -867,6 +872,7 @@ if (v8_use_external_startup_data) {
]
sources = [
"src/setup-isolate-deserialize.cc",
"src/snapshot/natives-external.cc",
"src/snapshot/snapshot-external.cc",
]
@ -875,6 +881,138 @@ if (v8_use_external_startup_data) {
}
}
v8_source_set("v8_builtins_generators") {
visibility = [
":*",
"test/cctest:*",
"test/unittests:*",
]
deps = [
":v8_base",
]
sources = [
### gcmole(all) ###
"src/builtins/builtins-arguments-gen.cc",
"src/builtins/builtins-arguments-gen.h",
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-async-function-gen.cc",
"src/builtins/builtins-async-gen.cc",
"src/builtins/builtins-async-gen.h",
"src/builtins/builtins-async-generator-gen.cc",
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-constructor-gen.cc",
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-forin-gen.cc",
"src/builtins/builtins-forin-gen.h",
"src/builtins/builtins-function-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
"src/builtins/builtins-handler-gen.cc",
"src/builtins/builtins-ic-gen.cc",
"src/builtins/builtins-internal-gen.cc",
"src/builtins/builtins-interpreter-gen.cc",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
"src/builtins/builtins-string-gen.cc",
"src/builtins/builtins-symbol-gen.cc",
"src/builtins/builtins-typedarray-gen.cc",
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
"src/builtins/setup-builtins-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/binary-op-assembler.cc",
"src/ic/binary-op-assembler.h",
"src/ic/keyed-store-generic.cc",
"src/ic/keyed-store-generic.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-generator.cc",
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
"src/interpreter/setup-interpreter-internal.cc",
"src/interpreter/setup-interpreter.h",
]
if (v8_current_cpu == "x86") {
sources += [
### gcmole(arch:ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
]
} else if (v8_current_cpu == "x64") {
sources += [
### gcmole(arch:x64) ###
"src/builtins/x64/builtins-x64.cc",
]
} else if (v8_current_cpu == "arm") {
sources += [
### gcmole(arch:arm) ###
"src/builtins/arm/builtins-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
sources += [
### gcmole(arch:arm64) ###
"src/builtins/arm64/builtins-arm64.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [
### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [
### gcmole(arch:ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [
### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
]
}
configs = [ ":internal_config" ]
}
v8_source_set("v8_builtins_setup") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":v8_builtins_generators",
]
sources = [
### gcmole(all) ###
"src/setup-isolate-full.cc",
]
configs = [ ":internal_config" ]
}
# This is split out to be a non-code containing target that the Chromium browser
# DLL can depend upon to get only a version string.
v8_header_set("v8_version") {
@ -894,7 +1032,8 @@ v8_source_set("v8_base") {
### gcmole(all) ###
"include/v8-debug.h",
"include/v8-experimental.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@ -912,8 +1051,6 @@ v8_source_set("v8_base") {
"src/api-arguments-inl.h",
"src/api-arguments.cc",
"src/api-arguments.h",
"src/api-experimental.cc",
"src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/api.cc",
@ -922,6 +1059,11 @@ v8_source_set("v8_base") {
"src/arguments.h",
"src/asmjs/asm-js.cc",
"src/asmjs/asm-js.h",
"src/asmjs/asm-names.h",
"src/asmjs/asm-parser.cc",
"src/asmjs/asm-parser.h",
"src/asmjs/asm-scanner.cc",
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-typer.cc",
"src/asmjs/asm-typer.h",
"src/asmjs/asm-types.cc",
@ -976,52 +1118,40 @@ v8_source_set("v8_base") {
"src/bootstrapper.cc",
"src/bootstrapper.h",
"src/builtins/builtins-api.cc",
"src/builtins/builtins-arguments.cc",
"src/builtins/builtins-arguments.h",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-async-function.cc",
"src/builtins/builtins-async-iterator.cc",
"src/builtins/builtins-async.cc",
"src/builtins/builtins-async.h",
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-constructor.cc",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion.cc",
"src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc",
"src/builtins/builtins-debug.cc",
"src/builtins/builtins-definitions.h",
"src/builtins/builtins-descriptors.h",
"src/builtins/builtins-error.cc",
"src/builtins/builtins-function.cc",
"src/builtins/builtins-generator.cc",
"src/builtins/builtins-global.cc",
"src/builtins/builtins-handler.cc",
"src/builtins/builtins-ic.cc",
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-object.h",
"src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
"src/builtins/builtins-regexp.h",
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-typedarray.cc",
"src/builtins/builtins-utils.h",
"src/builtins/builtins-wasm.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/callable.h",
"src/cancelable-task.cc",
"src/cancelable-task.h",
"src/char-predicates-inl.h",
@ -1034,6 +1164,7 @@ v8_source_set("v8_base") {
"src/code-stub-assembler.cc",
"src/code-stub-assembler.h",
"src/code-stubs-hydrogen.cc",
"src/code-stubs-utils.h",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/codegen.cc",
@ -1120,8 +1251,6 @@ v8_source_set("v8_base") {
"src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h",
"src/compiler/graph-trimmer.cc",
"src/compiler/graph-trimmer.h",
"src/compiler/graph-visualizer.cc",
@ -1265,8 +1394,6 @@ v8_source_set("v8_base") {
"src/compiler/wasm-linkage.cc",
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts-inl.h",
"src/contexts.cc",
"src/contexts.h",
@ -1393,8 +1520,6 @@ v8_source_set("v8_base") {
"src/external-reference-table.h",
"src/factory.cc",
"src/factory.h",
"src/fast-accessor-assembler.cc",
"src/fast-accessor-assembler.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/feedback-vector-inl.h",
@ -1433,6 +1558,8 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
"src/heap/gc-idle-time-handler.cc",
@ -1476,8 +1603,6 @@ v8_source_set("v8_base") {
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
@ -1491,8 +1616,6 @@ v8_source_set("v8_base") {
"src/ic/ic-stats.h",
"src/ic/ic.cc",
"src/ic/ic.h",
"src/ic/keyed-store-generic.cc",
"src/ic/keyed-store-generic.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
"src/icu_util.cc",
@ -1511,8 +1634,6 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-array-random-iterator.h",
"src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-dead-code-optimizer.cc",
"src/interpreter/bytecode-dead-code-optimizer.h",
"src/interpreter/bytecode-decoder.cc",
"src/interpreter/bytecode-decoder.h",
"src/interpreter/bytecode-flags.cc",
@ -1523,9 +1644,6 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-label.h",
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.cc",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-peephole-table.h",
"src/interpreter/bytecode-pipeline.cc",
"src/interpreter/bytecode-pipeline.h",
"src/interpreter/bytecode-register-allocator.h",
@ -1542,8 +1660,7 @@ v8_source_set("v8_base") {
"src/interpreter/control-flow-builders.h",
"src/interpreter/handler-table-builder.cc",
"src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics.cc",
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
@ -1577,6 +1694,7 @@ v8_source_set("v8_base") {
"src/lookup.h",
"src/machine-type.cc",
"src/machine-type.h",
"src/macro-assembler-inl.h",
"src/macro-assembler.h",
"src/managed.h",
"src/map-updater.cc",
@ -1591,6 +1709,15 @@ v8_source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
"src/objects/code-cache-inl.h",
"src/objects/code-cache.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/descriptor-array.h",
"src/objects/dictionary.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
"src/objects/hash-table.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
"src/objects/module-info.h",
@ -1599,9 +1726,9 @@ v8_source_set("v8_base") {
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/objects/string-table.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/duplicate-finder.cc",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/func-name-inferrer.cc",
@ -1729,6 +1856,7 @@ v8_source_set("v8_base") {
"src/runtime/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
"src/setup-isolate.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
@ -1776,6 +1904,9 @@ v8_source_set("v8_base") {
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
"src/trap-handler/handler-outside.cc",
"src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
"src/trap-handler/trap-handler.h",
"src/type-hints.cc",
"src/type-hints.h",
@ -1852,7 +1983,6 @@ v8_source_set("v8_base") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/builtins/ia32/builtins-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
@ -1882,6 +2012,7 @@ v8_source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h",
"src/ia32/simulator-ia32.cc",
"src/ia32/simulator-ia32.h",
"src/ia32/sse-instr.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
@ -1890,7 +2021,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
"src/builtins/x64/builtins-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-scheduler-x64.cc",
@ -1931,6 +2061,9 @@ v8_source_set("v8_base") {
"src/x64/simulator-x64.h",
"src/x64/sse-instr.h",
]
if (is_linux) {
sources += [ "src/trap-handler/handler-inside.cc" ]
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"src/arm/assembler-arm-inl.h",
@ -1954,7 +2087,6 @@ v8_source_set("v8_base") {
"src/arm/macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/builtins/arm/builtins-arm.cc",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-scheduler-arm.cc",
@ -2008,7 +2140,6 @@ v8_source_set("v8_base") {
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/builtins/arm64/builtins-arm64.cc",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
@ -2034,7 +2165,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
@ -2074,7 +2204,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
"src/compiler/mips64/code-generator-mips64.cc",
"src/compiler/mips64/instruction-codes-mips64.h",
"src/compiler/mips64/instruction-scheduler-mips64.cc",
@ -2114,7 +2243,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
"src/builtins/ppc/builtins-ppc.cc",
"src/compiler/ppc/code-generator-ppc.cc",
"src/compiler/ppc/instruction-codes-ppc.h",
"src/compiler/ppc/instruction-scheduler-ppc.cc",
@ -2154,7 +2282,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
"src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
@ -2194,7 +2321,6 @@ v8_source_set("v8_base") {
]
} else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
"src/compiler/x87/code-generator-x87.cc",
"src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc",
@ -2239,16 +2365,9 @@ v8_source_set("v8_base") {
":v8_libbase",
":v8_libsampler",
":v8_version",
"src/inspector:inspector",
]
sources += [ v8_generated_peephole_source ]
deps += [ ":run_mkpeephole" ]
if (is_win) {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
cflags = [ "/wd4267" ]
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
if (is_win) {
@ -2265,10 +2384,6 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ]
}
if (v8_enable_inspector) {
deps += [ "src/inspector:inspector" ]
}
}
v8_component("v8_libbase") {
@ -2325,6 +2440,7 @@ v8_component("v8_libbase") {
"src/base/safe_math_impl.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/timezone-cache.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
@ -2340,7 +2456,10 @@ v8_component("v8_libbase") {
}
if (is_posix) {
sources += [ "src/base/platform/platform-posix.cc" ]
sources += [
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
]
}
if (is_linux) {
@ -2491,6 +2610,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [
":v8_base",
":v8_builtins_setup",
":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
@ -2500,34 +2620,6 @@ if (current_toolchain == v8_snapshot_toolchain) {
}
}
v8_executable("mkpeephole") {
# mkpeephole needs to be built for the build host so the peephole lookup
# table can built during build. The table depends on the properties of
# bytecodes that are described in bytecodes.{cc,h}.
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/mkpeephole.cc",
]
configs = [
":external_config",
":internal_config",
]
deps = [
":v8_libbase",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
}
###############################################################################
# Public targets
#
@ -2667,9 +2759,6 @@ v8_executable("d8") {
}
defines = []
if (v8_enable_inspector) {
defines += [ "V8_INSPECTOR_ENABLED" ]
}
if (v8_enable_vtunejit) {
deps += [ "//src/third_party/vtune:v8_vtune" ]
@ -2869,17 +2958,6 @@ v8_source_set("wasm_module_runner") {
]
}
v8_source_set("wasm_test_signatures") {
sources = [
"test/common/wasm/test-signatures.h",
]
configs = [
":external_config",
":internal_config_base",
]
}
v8_source_set("wasm_fuzzer") {
sources = [
"test/fuzzer/wasm.cc",
@ -2920,13 +2998,13 @@ v8_fuzzer("wasm_asmjs_fuzzer") {
v8_source_set("wasm_code_fuzzer") {
sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-code.cc",
]
deps = [
":fuzzer_support",
":wasm_module_runner",
":wasm_test_signatures",
]
configs = [
@ -2940,13 +3018,13 @@ v8_fuzzer("wasm_code_fuzzer") {
v8_source_set("wasm_call_fuzzer") {
sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-call.cc",
]
deps = [
":fuzzer_support",
":wasm_module_runner",
":wasm_test_signatures",
]
configs = [
@ -3112,13 +3190,13 @@ v8_fuzzer("wasm_data_section_fuzzer") {
v8_source_set("wasm_compile_fuzzer") {
sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-compile.cc",
]
deps = [
":fuzzer_support",
":wasm_module_runner",
":wasm_test_signatures",
]
configs = [

1145
deps/v8/ChangeLog

File diff suppressed because it is too large

20
deps/v8/DEPS

@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "c7c2db69cd571523ce728c4d3dceedbd1896b519",
Var("chromium_url") + "/chromium/src/build.git" + "@" + "94c06fe70f3f6429c59e3ec0f6acd4f6710050b2",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474",
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "05d5695a73e78b9cae55b8579fd8bf22b85eb283",
"v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "94cdccbebc7a634c27145a3d84089e85fbb42e69",
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "d3074448541662f242bcee623049c13a231b5648",
"v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2":
@ -34,26 +34,22 @@ deps = {
"v8/test/mozilla/data":
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "a72ee6d91275aa6524e84a9b7070103411ef2689",
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "9913fb19b687b0c858f697efd7bd2468d789a3d5",
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
"v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "b8b919e4a0d52db4d3d762e731e615bc3a38b3b2",
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "07fd6430f879d36928d179a62d9bdeed82286065",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc",
Var("chromium_url") + "/android_tools.git" + "@" + "b65c4776dac2cf1b80e969b3b2d4e081b9c84f29",
"v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "246a39a82c2213d913a96fff020a263838dc76e6",
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "9a55abab029cb9ae94f5160ded11b09a4638a955",
},
"win": {
"v8/third_party/cygwin":
Var("chromium_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
}
}
recursedeps = [

1
deps/v8/OWNERS

@ -7,7 +7,6 @@ bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
epertoso@chromium.org
franzih@chromium.org
gsathya@chromium.org
hablich@chromium.org

28
deps/v8/PRESUBMIT.py

@ -31,6 +31,7 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import sys
@ -250,6 +251,7 @@ def _CheckMissingFiles(input_api, output_api):
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckCommitMessageBugEntry(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
@ -276,6 +278,32 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckCommitMessageBugEntry(input_api, output_api):
"""Check that bug entries are well-formed in commit message."""
bogus_bug_msg = (
'Bogus BUG entry: %s. Please specify the issue tracker prefix and the '
'issue number, separated by a colon, e.g. v8:123 or chromium:12345.')
results = []
for bug in (input_api.change.BUG or '').split(','):
bug = bug.strip()
if 'none'.startswith(bug.lower()):
continue
if ':' not in bug:
try:
if int(bug) > 100000:
# Rough indicator for current chromium bugs.
prefix_guess = 'chromium'
else:
prefix_guess = 'v8'
results.append('BUG entry requires issue tracker prefix, e.g. %s:%s' %
(prefix_guess, bug))
except ValueError:
results.append(bogus_bug_msg % bug)
elif not re.match(r'\w+:\d+', bug):
results.append(bogus_bug_msg % bug)
return [output_api.PresubmitError(r) for r in results]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))

7
deps/v8/gni/isolate.gni

@ -101,11 +101,6 @@ template("v8_isolate_run") {
} else {
icu_use_data_file_flag = "0"
}
if (v8_enable_inspector) {
enable_inspector = "1"
} else {
enable_inspector = "0"
}
if (v8_use_external_startup_data) {
use_external_startup_data = "1"
} else {
@ -177,8 +172,6 @@ template("v8_isolate_run") {
"--config-variable",
"target_arch=$target_arch",
"--config-variable",
"v8_enable_inspector=$enable_inspector",
"--config-variable",
"v8_use_external_startup_data=$use_external_startup_data",
"--config-variable",
"v8_use_snapshot=$use_snapshot",

6
deps/v8/gni/v8.gni

@ -37,9 +37,6 @@ declare_args() {
# add a dependency on the ICU library.
v8_enable_i18n_support = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = true
# Use static libraries instead of source_sets.
v8_static_library = false
}
@ -66,9 +63,8 @@ v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.json"
#
# Common configs to remove or add in all v8 targets.
v8_remove_configs = [ "//build/config/compiler:chromium_code" ]
v8_remove_configs = []
v8_add_configs = [
"//build/config/compiler:no_chromium_code",
v8_path_prefix + ":features",
v8_path_prefix + ":toolchain",
]

12
deps/v8/gypfiles/all.gyp

@ -9,6 +9,7 @@
'type': 'none',
'dependencies': [
'../src/d8.gyp:d8',
'../test/inspector/inspector.gyp:*',
],
'conditions': [
['component!="shared_library"', {
@ -25,20 +26,11 @@
'../test/unittests/unittests.gyp:*',
],
}],
['v8_enable_inspector==1', {
'dependencies': [
'../test/inspector/inspector.gyp:*',
],
}],
['v8_enable_inspector==1 and test_isolation_mode != "noop"', {
'dependencies': [
'../test/debugger/debugger.gyp:*',
],
}],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/debugger/debugger.gyp:*',
'../test/default.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',

3
deps/v8/gypfiles/features.gypi

@ -142,5 +142,8 @@
], # conditions
}, # Release
}, # configurations
'defines': [
'V8_GYP_BUILD',
], # defines
}, # target_defaults
}

1
deps/v8/gypfiles/isolate.gypi

@ -82,7 +82,6 @@
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'v8_enable_inspector=<(v8_enable_inspector)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],

25
deps/v8/gypfiles/standalone.gypi

@ -46,7 +46,6 @@
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.7',
'release_extra_cflags%': '',
'v8_enable_inspector%': 0,
'variables': {
'variables': {
'variables': {
@ -93,16 +92,16 @@
['OS=="linux" and use_sysroot==1', {
'conditions': [
['target_arch=="arm"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_arm-sysroot',
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_arm-sysroot',
}],
['target_arch=="x64"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_amd64-sysroot',
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_amd64-sysroot',
}],
['target_arch=="ia32"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_i386-sysroot',
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_i386-sysroot',
}],
['target_arch=="mipsel"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_mips-sysroot',
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_jessie_mips-sysroot',
}],
],
}], # OS=="linux" and use_sysroot==1
@ -243,9 +242,6 @@
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
# Relative path to inspector.gyp from this file.
'inspector_gyp_path': '../src/v8-inspector/inspector.gyp',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
@ -257,18 +253,6 @@
}, {
'want_separate_host_toolset': 0,
}],
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="mips" and host_arch!="mips") or \
(v8_target_arch=="mips64" and host_arch!="mips64") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset_mkpeephole': 1,
}, {
'want_separate_host_toolset_mkpeephole': 0,
}],
['OS == "win"', {
'os_posix%': 0,
}, {
@ -870,6 +854,7 @@
],
}],
],
'msvs_cygwin_shell': 0,
'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
'msvs_disabled_warnings': [
# C4091: 'typedef ': ignored on left of 'X' when no variable is

1
deps/v8/gypfiles/toolchain.gypi

@ -74,7 +74,6 @@
# Chrome needs this definition unconditionally. For standalone V8 builds,
# it's handled in gypfiles/standalone.gypi.
'want_separate_host_toolset%': 1,
'want_separate_host_toolset_mkpeephole%': 1,
# Toolset the shell binary should be compiled for. Possible values are
# 'host' and 'target'.

8
deps/v8/include/libplatform/libplatform.h

@ -12,6 +12,8 @@
namespace v8 {
namespace platform {
enum class IdleTaskSupport { kDisabled, kEnabled };
/**
* Returns a new instance of the default v8::Platform implementation.
*
@ -19,9 +21,13 @@ namespace platform {
* is the number of worker threads to allocate for background jobs. If a value
* of zero is passed, a suitable default based on the current number of
* processors online will be chosen.
* If |idle_task_support| is enabled then the platform will accept idle
* tasks (IdleTasksEnabled will return true) and will rely on the embedder
* calling v8::platform::RunIdleTasks to process the idle tasks.
*/
V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
int thread_pool_size = 0);
int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled);
/**
* Pumps the message loop for the given isolate.

43
deps/v8/include/v8-debug.h

@ -8,7 +8,9 @@
#include "v8.h" // NOLINT(build/include)
/**
* Debugger support for the V8 JavaScript engine.
* ATTENTION: The debugger API exposed by this file is deprecated and will be
* removed by the end of 2017. Please use the V8 inspector declared
* in include/v8-inspector.h instead.
*/
namespace v8 {
@ -140,21 +142,19 @@ class V8_EXPORT Debug {
*/
typedef void (*MessageHandler)(const Message& message);
/**
* This is now a no-op.
*/
typedef void (*DebugMessageDispatchHandler)();
static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
V8_DEPRECATED("No longer supported", static bool SetDebugEventListener(
Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
static void DebugBreak(Isolate* isolate);
V8_DEPRECATED("No longer supported",
static void DebugBreak(Isolate* isolate));
// Remove scheduled debugger break in given isolate if it has not
// happened yet.
static void CancelDebugBreak(Isolate* isolate);
V8_DEPRECATED("No longer supported",
static void CancelDebugBreak(Isolate* isolate));
// Check if a debugger break is scheduled in the given isolate.
V8_DEPRECATED("No longer supported",
@ -189,10 +189,10 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>());
V8_DEPRECATED("No longer supported",
static MaybeLocal<Value> Call(
Local<Context> context, v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
// This is now a no-op.
V8_DEPRECATED("No longer supported",
@ -221,23 +221,28 @@ class V8_EXPORT Debug {
* (default Isolate if not provided). V8 will abort if LiveEdit is
* unexpectedly used. LiveEdit is enabled by default.
*/
static void SetLiveEditEnabled(Isolate* isolate, bool enable);
V8_DEPRECATED("No longer supported",
static void SetLiveEditEnabled(Isolate* isolate, bool enable));
/**
* Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context.
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
V8_DEPRECATED("No longer supported",
static MaybeLocal<Array> GetInternalProperties(
Isolate* isolate, Local<Value> value));
/**
* Defines if the ES2015 tail call elimination feature is enabled or not.
* The change of this flag triggers deoptimization of all functions that
* contain calls at tail position.
*/
static bool IsTailCallEliminationEnabled(Isolate* isolate);
static void SetTailCallEliminationEnabled(Isolate* isolate, bool enabled);
V8_DEPRECATED("No longer supported",
static bool IsTailCallEliminationEnabled(Isolate* isolate));
V8_DEPRECATED("No longer supported",
static void SetTailCallEliminationEnabled(Isolate* isolate,
bool enabled));
};

58
deps/v8/include/v8-experimental.h

@ -1,58 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* This header contains a set of experimental V8 APIs. We hope these will
* become a part of standard V8, but they may also be removed if we deem the
* experiment to not be successul.
*/
#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
#define V8_INCLUDE_V8_EXPERIMENTAL_H_
#include "v8.h" // NOLINT(build/include)
namespace v8 {
namespace experimental {
// Allow the embedder to construct accessors that V8 can compile and use
// directly, without jumping into the runtime.
class V8_EXPORT FastAccessorBuilder {
public:
struct ValueId {
size_t value_id;
};
struct LabelId {
size_t label_id;
};
static FastAccessorBuilder* New(Isolate* isolate);
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no);
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
ValueId ToSmi(ValueId value_id);
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void Goto(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
ValueId Call(v8::FunctionCallback callback, ValueId value_id);
private:
FastAccessorBuilder() = delete;
FastAccessorBuilder(const FastAccessorBuilder&) = delete;
~FastAccessorBuilder() = delete;
void operator=(const FastAccessorBuilder&) = delete;
};
} // namespace experimental
} // namespace v8
#endif // V8_INCLUDE_V8_EXPERIMENTAL_H_

2
deps/v8/include/v8-inspector.h

@ -224,8 +224,6 @@ class V8_EXPORT V8Inspector {
virtual void resetContextGroup(int contextGroupId) = 0;
// Various instrumentation.
virtual void willExecuteScript(v8::Local<v8::Context>, int scriptId) = 0;
virtual void didExecuteScript(v8::Local<v8::Context>) = 0;
virtual void idleStarted() = 0;
virtual void idleFinished() = 0;

8
deps/v8/include/v8-platform.h

@ -212,6 +212,14 @@ class Platform {
/** Removes tracing state change observer. */
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
typedef void (*StackTracePrinter)();
/**
* Returns a function pointer that print a stack trace of the current stack
* on invocation. Disables printing of the stack trace if nullptr.
*/
virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
};
} // namespace v8

5
deps/v8/include/v8-profiler.h

@ -812,11 +812,6 @@ class V8_EXPORT HeapProfiler {
/** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize();
/**
* Sets a RetainedObjectInfo for an object group (see V8::SetObjectGroupId).
*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
private:
HeapProfiler();
~HeapProfiler();

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 8
#define V8_BUILD_NUMBER 283
#define V8_PATCH_LEVEL 41
#define V8_MINOR_VERSION 9
#define V8_BUILD_NUMBER 211
#define V8_PATCH_LEVEL 32
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

646
deps/v8/include/v8.h

File diff suppressed because it is too large

6
deps/v8/infra/config/cq.cfg

@ -32,6 +32,10 @@ verifiers {
buckets {
name: "master.tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
builders {
name: "v8_node_linux64_rel"
experiment_percentage: 100
}
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
@ -119,7 +123,7 @@ verifiers {
}
builders {
name: "v8_linux64_sanitizer_coverage_rel"
experiment_percentage: 20
experiment_percentage: 100
}
}
buckets {

69
deps/v8/infra/mb/mb_config.pyl

@ -24,6 +24,18 @@
'mips64el.debug': 'default_debug_mips64el',
'mips64el.optdebug': 'default_optdebug_mips64el',
'mips64el.release': 'default_release_mips64el',
'ppc.debug': 'default_debug_ppc',
'ppc.optdebug': 'default_optdebug_ppc',
'ppc.release': 'default_release_ppc',
'ppc64.debug': 'default_debug_ppc64',
'ppc64.optdebug': 'default_optdebug_ppc64',
'ppc64.release': 'default_release_ppc64',
's390.debug': 'default_debug_s390',
's390.optdebug': 'default_optdebug_s390',
's390.release': 'default_release_s390',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
@ -92,11 +104,11 @@
# original config also specified -O1, which we dropped because chromium
# doesn't have it (anymore).
'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized',
'V8 Linux - vtunejit': 'gyp_debug_x86_vtunejit',
'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage',
'V8 Linux - predictable': 'gyp_release_x86_predictable',
'V8 Linux - predictable': 'gn_release_x86_predictable',
'V8 Linux - full debug': 'gyp_full_debug_x86',
'V8 Linux - interpreted regexp': 'gyp_release_x86_interpreted_regexp',
'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86',
},
@ -234,6 +246,34 @@
'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'gn', 'release', 'simulate_mips64el'],
'default_debug_ppc': [
'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_ppc': [
'gn', 'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc': [
'gn', 'release', 'simulate_ppc'],
'default_debug_ppc64': [
'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_ppc64': [
'gn', 'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64': [
'gn', 'release', 'simulate_ppc64'],
'default_debug_s390': [
'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_s390': [
'gn', 'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'default_release_s390': [
'gn', 'release', 'simulate_s390'],
'default_debug_s390x': [
'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_s390x': [
'gn', 'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
'default_release_s390x': [
'gn', 'release', 'simulate_s390x'],
'default_debug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [
@ -350,14 +390,15 @@
'gn_debug_x86_minimal_symbols': [
'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_debug_x86_no_i18n': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_disable_inspector',
'v8_no_i18n'],
'gn', 'debug_bot', 'x86', 'swarming', 'v8_no_i18n'],
'gn_debug_x86_no_snap': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_no_snap_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming'],
'gn_debug_x86_vtunejit': [
'gn', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
# GN release configs for x86.
'gn_release_x86': [
@ -370,11 +411,12 @@
'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_gcmole_trybot': [
'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_interpreted_regexp': [
'gn', 'release_bot', 'x86', 'v8_interpreted_regexp'],
'gn_release_x86_minimal_symbols': [
'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_release_x86_no_i18n_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_disable_inspector',
'v8_no_i18n'],
'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
'gn_release_x86_no_snap': [
'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_no_snap_shared_minimal_symbols': [
@ -382,6 +424,8 @@
'v8_snapshot_none'],
'gn_release_x86_no_snap_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_predictable': [
'gn', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
'gn_release_x86_shared_verify_heap': [
'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
'gn_release_x86_trybot': [
@ -397,8 +441,6 @@
# Gyp debug configs for x86.
'gyp_debug_x86': [
'gyp', 'debug_bot', 'x86', 'swarming'],
'gyp_debug_x86_vtunejit': [
'gyp', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
'gyp_full_debug_x86': [
'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
@ -432,10 +474,6 @@
# Gyp release configs for x86.
'gyp_release_x86_disassembler': [
'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gyp_release_x86_interpreted_regexp': [
'gyp', 'release_bot', 'x86', 'v8_interpreted_regexp'],
'gyp_release_x86_predictable': [
'gyp', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
},
'mixins': {
@ -661,11 +699,6 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
'v8_disable_inspector': {
'gn_args': 'v8_enable_inspector=false',
'gyp_defines': 'v8_enable_inspector=0 ',
},
'v8_enable_disassembler': {
'gn_args': 'v8_enable_disassembler=true',
'gyp_defines': 'v8_enable_disassembler=1',

3
deps/v8/src/DEPS

@ -18,6 +18,9 @@ include_rules = [
"+src/interpreter/bytecode-register.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
"+src/interpreter/setup-interpreter.h",
"-src/trap-handler",
"+src/trap-handler/trap-handler.h",
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform"

1
deps/v8/src/OWNERS

@ -1,4 +1,5 @@
per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com
per-file i18n.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org

139
deps/v8/src/api-experimental.cc

@ -1,139 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* Implementation for v8-experimental.h.
*/
#include "src/api-experimental.h"
#include "include/v8-experimental.h"
#include "include/v8.h"
#include "src/api.h"
#include "src/fast-accessor-assembler.h"
#include "src/objects-inl.h"
namespace {
v8::internal::FastAccessorAssembler* FromApi(
v8::experimental::FastAccessorBuilder* builder) {
return reinterpret_cast<v8::internal::FastAccessorAssembler*>(builder);
}
v8::experimental::FastAccessorBuilder* FromInternal(
v8::internal::FastAccessorAssembler* fast_accessor_assembler) {
return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
fast_accessor_assembler);
}
} // namespace
namespace v8 {
namespace internal {
namespace experimental {
MaybeHandle<Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler) {
i::MaybeHandle<i::Code> code;
if (fast_handler != nullptr) {
auto faa = FromApi(fast_handler);
code = faa->Build();
CHECK(!code.is_null());
delete faa;
}
return code;
}
} // namespace experimental
} // namespace internal
namespace experimental {
FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal::FastAccessorAssembler* faa =
new internal::FastAccessorAssembler(i_isolate);
return FromInternal(faa);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::IntegerConstant(
int const_value) {
return FromApi(this)->IntegerConstant(const_value);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::GetReceiver() {
return FromApi(this)->GetReceiver();
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalField(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalFieldUnchecked(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalFieldUnchecked(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
int offset) {
return FromApi(this)->LoadValue(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
int offset) {
return FromApi(this)->LoadObject(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::ToSmi(ValueId value_id) {
return FromApi(this)->ToSmi(value_id);
}
void FastAccessorBuilder::ReturnValue(ValueId value) {
FromApi(this)->ReturnValue(value);
}
void FastAccessorBuilder::CheckFlagSetOrReturnNull(ValueId value_id, int mask) {
FromApi(this)->CheckFlagSetOrReturnNull(value_id, mask);
}
void FastAccessorBuilder::CheckNotZeroOrReturnNull(ValueId value_id) {
FromApi(this)->CheckNotZeroOrReturnNull(value_id);
}
FastAccessorBuilder::LabelId FastAccessorBuilder::MakeLabel() {
return FromApi(this)->MakeLabel();
}
void FastAccessorBuilder::SetLabel(LabelId label_id) {
FromApi(this)->SetLabel(label_id);
}
void FastAccessorBuilder::Goto(LabelId label_id) {
FromApi(this)->Goto(label_id);
}
void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::Call(
v8::FunctionCallback callback, ValueId value_id) {
return FromApi(this)->Call(callback, value_id);
}
} // namespace experimental
} // namespace v8

28
deps/v8/src/api-experimental.h

@ -1,28 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_API_EXPERIMENTAL_H_
#define V8_API_EXPERIMENTAL_H_
namespace v8 {
namespace internal {
class Code;
template <typename T>
class MaybeHandle;
} // internal;
namespace experimental {
class FastAccessorBuilder;
} // experimental
namespace internal {
namespace experimental {
v8::internal::MaybeHandle<v8::internal::Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler);
} // namespace experimental
} // namespace internal
} // namespace v8
#endif // V8_API_EXPERIMENTAL_H_

42
deps/v8/src/api-natives.cc

@ -36,7 +36,8 @@ class InvokeScope {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target,
bool is_hidden_prototype);
bool is_hidden_prototype,
bool is_prototype);
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
@ -49,7 +50,7 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<FunctionTemplateInfo>::cast(data), name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
Handle<JSReceiver>(), false);
Handle<JSReceiver>(), false, false);
} else {
return data;
}
@ -338,7 +339,8 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
bool is_hidden_prototype) {
bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::cast(info->serial_number())->value();
if (!new_target.is_null()) {
@ -379,19 +381,26 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<JSObject> object;
ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject);
if (is_prototype) JSObject::OptimizeAsPrototype(object, FAST_PROTOTYPE);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
if (info->immutable_proto()) {
JSObject::SetImmutableProto(object);
}
if (!is_prototype) {
// Keep prototypes in slow-mode. Let them be lazily turned fast later on.
// TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
// Don't cache prototypes.
if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
result = isolate->factory()->CopyJSObject(result);
}
}
return result;
}
@ -446,7 +455,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
Handle<JSReceiver>(), data->hidden_prototype()),
Handle<JSReceiver>(), data->hidden_prototype(), true),
JSFunction);
}
Object* parent = data->parent_template();
@ -514,7 +523,8 @@ MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateObject(isolate, data, new_target, false);
return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
false);
}
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
@ -524,22 +534,14 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<FunctionTemplateInfo> constructor(
FunctionTemplateInfo::cast(data->constructor()));
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, constructor);
Handle<Map> initial_map = isolate->factory()->CreateSloppyFunctionMap(
FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Handle<JSFunction> object_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
initial_map, shared, isolate->factory()->undefined_value());
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->internal_field_count() * kPointerSize,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
FAST_HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(object_function, object_map,
isolate->factory()->null_value());
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(object_map);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
return object;
@ -629,18 +631,18 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
DONT_ENUM);
}
int internal_field_count = 0;
int embedder_field_count = 0;
bool immutable_proto = false;
if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count = instance_template->internal_field_count();
embedder_field_count = instance_template->embedder_field_count();
immutable_proto = instance_template->immutable_proto();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
int instance_size = kPointerSize * embedder_field_count;
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:

791
deps/v8/src/api.cc

File diff suppressed because it is too large

15
deps/v8/src/api.h

@ -106,12 +106,13 @@ class RegisteredExtension {
V(Context, Context) \
V(External, Object) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise)
V(Promise, JSPromise) \
V(DynamicImportResult, JSPromise)
class Utils {
public:
@ -185,10 +186,12 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<DynamicImportResult> PromiseToDynamicImportResult(
v8::internal::Handle<v8::internal::JSPromise> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
v8::internal::Handle<v8::internal::StackFrameInfo> obj);
static inline Local<Number> NumberToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Integer> IntegerToLocal(
@ -317,8 +320,9 @@ MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
@ -347,6 +351,8 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
#undef MAKE_OPEN_HANDLE
#undef OPEN_HANDLE_LIST
extern Isolate* IsolateNewImpl(internal::Isolate* isolate,
const Isolate::CreateParams& params);
namespace internal {
@ -645,7 +651,6 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
(!blocks_.is_empty() && prev_limit != NULL));
}
// Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked.
void InvokeAccessorGetterCallback(

67
deps/v8/src/arm/assembler-arm-inl.h

@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
@ -98,32 +98,28 @@ int RelocInfo::target_address_size() {
return kPointerSize;
}
Object* RelocInfo::target_object() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
return HeapObject::cast(
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_, host_)));
return Handle<HeapObject>(
reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target,
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@ -152,13 +148,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
@ -187,13 +182,9 @@ void RelocInfo::set_target_cell(Cell* cell,
}
}
static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Object>();
return Handle<Code>();
}
@ -221,27 +212,25 @@ Address RelocInfo::debug_call_address() {
return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
void RelocInfo::set_debug_call_address(Address target) {
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
void RelocInfo::WipeOut() {
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
Assembler::set_target_address_at(isolate, pc_, host_, NULL);
}
}
@ -299,6 +288,7 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode;
}
Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
@ -322,14 +312,6 @@ Operand::Operand(Register rm) {
}
bool Operand::is_reg() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
@ -542,6 +524,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
@ -602,6 +585,8 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8

576
deps/v8/src/arm/assembler-arm.cc

@ -39,9 +39,11 @@
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -357,13 +359,13 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_size(uint32_t size,
void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
Assembler::set_target_address_at(isolate, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@ -466,7 +468,6 @@ NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
SetAlignment(align);
}
void NeonMemOperand::SetAlignment(int align) {
switch (align) {
case 0:
@ -549,8 +550,8 @@ const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
pending_32_bit_constants_(),
pending_64_bit_constants_(),
@ -939,25 +940,25 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target24));
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
patcher.mov(dst, Operand(target24));
} else {
uint16_t target16_0 = target24 & kImm16Mask;
uint16_t target16_1 = target24 >> 16;
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
1, CodePatcher::DONT_FLUSH);
CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0);
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
} else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH);
CpuFeatureScope scope(patcher.masm(), ARMv7);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
patcher.movt(dst, target16_1);
}
} else {
// Patch with a sequence of mov/orr/orr instructions.
@ -965,16 +966,16 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
} else {
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
3, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
PatchingAssembler patcher(isolate_data(),
reinterpret_cast<byte*>(buffer_ + pos), 3);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
patcher.orr(dst, dst, Operand(target8_2 << 16));
}
}
}
@ -1523,6 +1524,10 @@ void Assembler::sub(Register dst, Register src1, const Operand& src2,
addrmod1(cond | SUB | s, src1, dst, src2);
}
void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
sub(dst, src1, Operand(src2), s, cond);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@ -1535,6 +1540,10 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
addrmod1(cond | ADD | s, src1, dst, src2);
}
void Assembler::add(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
add(dst, src1, Operand(src2), s, cond);
}
void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@ -1558,6 +1567,9 @@ void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TST | S, src1, r0, src2);
}
void Assembler::tst(Register src1, Register src2, Condition cond) {
tst(src1, Operand(src2), cond);
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | TEQ | S, src1, r0, src2);
@ -1568,6 +1580,9 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMP | S, src1, r0, src2);
}
void Assembler::cmp(Register src1, Register src2, Condition cond) {
cmp(src1, Operand(src2), cond);
}
void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) {
@ -1586,6 +1601,10 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
addrmod1(cond | ORR | s, src1, dst, src2);
}
void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
orr(dst, src1, Operand(src2), s, cond);
}
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using
@ -1595,6 +1614,9 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | MOV | s, r0, dst, src);
}
void Assembler::mov(Register dst, Register src, SBit s, Condition cond) {
mov(dst, Operand(src), s, cond);
}
void Assembler::mov_label_offset(Register dst, Label* label) {
if (label->is_bound()) {
@ -1657,6 +1679,32 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | MVN | s, r0, dst, src);
}
void Assembler::asr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
}
}
void Assembler::lsl(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
}
}
void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
Condition cond) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
}
}
// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
@ -2233,19 +2281,12 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
DCHECK(code >= kDefaultStopCode);
{
// The Simulator will handle the stop instruction and get the message
// address. It expects to find the address just after the svc instruction.
BlockConstPoolScope block_const_pool(this);
if (code >= 0) {
svc(kStopCode + code, cond);
} else {
svc(kStopCode + kMaxStopCode, cond);
}
// Do not embed the message string address! We used to do this, but that
// made snapshots created from position-independent executable builds
// non-deterministic.
// TODO(yangguo): remove this field entirely.
nop();
}
#else // def __arm__
if (cond != al) {
@ -3005,13 +3046,9 @@ static void SplitRegCode(VFPType reg_type,
int* m) {
DCHECK((reg_code >= 0) && (reg_code <= 31));
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
// 32 bit type.
*m = reg_code & 0x1;
*vm = reg_code >> 1;
SwVfpRegister::split_code(reg_code, vm, m);
} else {
// 64 bit type.
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
DwVfpRegister::split_code(reg_code, vm, m);
}
}
@ -3854,9 +3891,7 @@ void Assembler::vld1(NeonSize size,
dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
}
void Assembler::vst1(NeonSize size,
const NeonListOperand& src,
void Assembler::vst1(NeonSize size, const NeonListOperand& src,
const NeonMemOperand& dst) {
// Instruction details available in ARM DDI 0406C.b, A8.8.404.
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
@ -3884,6 +3919,21 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
0xA * B8 | m * B5 | B4 | vm);
}
void Assembler::vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.1004.
// vqmovn.<type><size> Dd, Qm. ARM vector narrowing move with saturation.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
int op = u != 0 ? 3 : 2;
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | 0x2 * B16 | vd * B12 |
0x2 * B8 | op * B6 | m * B5 | vm);
}
static int EncodeScalar(NeonDataType dt, int index) {
int opc1_opc2 = 0;
DCHECK_LE(0, index);
@ -3935,51 +3985,13 @@ void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
n * B7 | B4 | opc1_opc2);
}
void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) {
void Assembler::vmov(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8-938.
// vmov is encoded as vorr.
vorr(dst, src, src);
}
void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
}
void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
vm);
}
void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
const Register src) {
void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-886.
int B = 0, E = 0;
@ -4003,7 +4015,7 @@ void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
0xB * B8 | d * B7 | E * B5 | B4);
}
void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
void Assembler::vdup(QwNeonRegister dst, SwVfpRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-884.
int index = src.code() & 1;
@ -4019,8 +4031,8 @@ void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
}
// Encode NEON vcvt.src_type.dst_type instruction.
static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
const VFPType src_type, const QwNeonRegister src) {
static Instr EncodeNeonVCVT(VFPType dst_type, QwNeonRegister dst,
VFPType src_type, QwNeonRegister src) {
DCHECK(src_type != dst_type);
DCHECK(src_type == F32 || dst_type == F32);
// Instruction details available in ARM DDI 0406C.b, A8.8.868.
@ -4042,103 +4054,142 @@ static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
B6 | m * B5 | vm;
}
void Assembler::vcvt_f32_s32(const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, S32, src));
}
void Assembler::vcvt_f32_u32(const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, U32, src));
}
void Assembler::vcvt_s32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(S32, dst, F32, src));
}
void Assembler::vcvt_u32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
// op is instr->Bits(11, 7).
static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK_IMPLIES(is_float, size == Neon32);
enum NeonRegType { NEON_D, NEON_Q };
void NeonSplitCode(NeonRegType type, int code, int* vm, int* m, int* encoding) {
if (type == NEON_D) {
DwVfpRegister::split_code(code, vm, m);
} else {
DCHECK_EQ(type, NEON_Q);
QwNeonRegister::split_code(code, vm, m);
*encoding |= B6;
}
}
enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
int dst_code, int src_code) {
int op_encoding = 0;
switch (op) {
case VMVN:
DCHECK_EQ(Neon8, size); // size == 0 for vmvn
op_encoding = B10 | 0x3 * B7;
break;
case VSWP:
DCHECK_EQ(Neon8, size); // size == 0 for vswp
op_encoding = B17;
break;
case VABS:
op_encoding = B16 | 0x6 * B7;
break;
case VABSF:
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x6 * B7;
break;
case VNEG:
op_encoding = B16 | 0x7 * B7;
break;
case VNEGF:
DCHECK_EQ(Neon32, size);
op_encoding = B16 | B10 | 0x7 * B7;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vm, m;
src.split_code(&vm, &m);
int F = is_float ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 |
F * B10 | B8 | op * B7 | B6 | m * B5 | vm;
NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | vd * B12 | m * B5 |
vm | op_encoding;
}
void Assembler::vmvn(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vmvn(Qn, Qm) SIMD bitwise negate.
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VMVN, NEON_Q, Neon8, dst.code(), src.code()));
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
DCHECK(IsEnabled(NEON));
// Dd = vswp(Dn, Dm) SIMD d-register swap.
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VSWP, NEON_D, Neon8, dst.code(), src.code()));
}
void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vswp(Qn, Qm) SIMD q-register swap.
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(VSWP, NEON_Q, Neon8, dst.code(), src.code()));
}
void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) {
void Assembler::vabs(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src));
emit(EncodeNeonUnaryOp(VABSF, NEON_Q, Neon32, dst.code(), src.code()));
}
void Assembler::vabs(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, false, size, dst, src));
emit(EncodeNeonUnaryOp(VABS, NEON_Q, size, dst.code(), src.code()));
}
void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) {
void Assembler::vneg(QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src));
emit(EncodeNeonUnaryOp(VNEGF, NEON_Q, Neon32, dst.code(), src.code()));
}
void Assembler::vneg(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, false, size, dst, src));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
B4 | vm);
emit(EncodeNeonUnaryOp(VNEG, NEON_Q, size, dst.code(), src.code()));
}
enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, NeonRegType reg_type,
int dst_code, int src_code1,
int src_code2) {
int op_encoding = 0;
switch (op) {
case VBIC:
@ -4170,13 +4221,14 @@ static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
break;
}
int vd, d;
dst.split_code(&vd, &d);
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vn, n;
src1.split_code(&vn, &n);
NeonSplitCode(reg_type, src_code1, &vn, &n, &op_encoding);
int vm, m;
src2.split_code(&vm, &m);
NeonSplitCode(reg_type, src_code2, &vm, &m, &op_encoding);
return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
n * B7 | B6 | m * B5 | B4 | vm;
n * B7 | m * B5 | B4 | vm;
}
void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
@ -4184,15 +4236,26 @@ void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
// Qd = vand(Qn, Qm) SIMD AND.
// Instruction details available in ARM DDI 0406C.b, A8.8.836.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2));
emit(EncodeNeonBinaryBitwiseOp(VAND, NEON_Q, dst.code(), src1.code(),
src2.code()));
}
void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
void Assembler::vbsl(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = vbsl(Qn, Qm) SIMD bitwise select.
// Instruction details available in ARM DDI 0406C.b, A8-844.
emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2));
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VBSL, NEON_Q, dst.code(), src1.code(),
src2.code()));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_D, dst.code(), src1.code(),
src2.code()));
}
void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
@ -4200,7 +4263,8 @@ void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
// Qd = veor(Qn, Qm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2));
emit(EncodeNeonBinaryBitwiseOp(VEOR, NEON_Q, dst.code(), src1.code(),
src2.code()));
}
void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
@ -4208,7 +4272,8 @@ void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
// Qd = vorr(Qn, Qm) SIMD OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.976.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
emit(EncodeNeonBinaryBitwiseOp(VORR, NEON_Q, dst.code(), src1.code(),
src2.code()));
}
enum FPBinOp {
@ -4287,9 +4352,8 @@ enum IntegerBinOp {
};
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
int op_encoding = 0;
switch (op) {
case VADD:
@ -4341,10 +4405,8 @@ static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
n * B7 | B6 | m * B5 | vm | op_encoding;
}
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
// Map NeonSize values to the signed values in NeonDataType, so the U bit
// will be 0.
return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
@ -4406,16 +4468,16 @@ void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
}
void Assembler::vmul(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960.
emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
}
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vmin(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928.
@ -4529,6 +4591,51 @@ void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
}
enum NeonPairwiseOp { VPMIN, VPMAX };
static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
int op_encoding = 0;
switch (op) {
case VPMIN:
op_encoding = 0xA * B8 | B4;
break;
case VPMAX:
op_encoding = 0xA * B8;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
n * B7 | m * B5 | vm | op_encoding;
}
void Assembler::vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// Dd = vpmin(Dn, Dm) SIMD integer pairwise MIN.
// Instruction details available in ARM DDI 0406C.b, A8-986.
emit(EncodeNeonPairwiseOp(VPMIN, dt, dst, src1, src2));
}
void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// Dd = vpmax(Dn, Dm) SIMD integer pairwise MAX.
// Instruction details available in ARM DDI 0406C.b, A8-986.
emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2));
}
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
@ -4585,8 +4692,8 @@ void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
}
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes) {
void Assembler::vext(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2, int bytes) {
DCHECK(IsEnabled(NEON));
// Qd = vext(Qn, Qm) SIMD byte extract.
// Instruction details available in ARM DDI 0406C.b, A8-890.
@ -4601,57 +4708,110 @@ void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vzip(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
enum NeonSizedOp { VZIP, VUZP, VREV16, VREV32, VREV64, VTRN };
static Instr EncodeNeonSizedOp(NeonSizedOp op, NeonRegType reg_type,
NeonSize size, int dst_code, int src_code) {
int op_encoding = 0;
switch (op) {
case VZIP:
op_encoding = 0x2 * B16 | 0x3 * B7;
break;
case VUZP:
op_encoding = 0x2 * B16 | 0x2 * B7;
break;
case VREV16:
op_encoding = 0x2 * B7;
break;
case VREV32:
op_encoding = 0x1 * B7;
break;
case VREV64:
// op_encoding is 0;
break;
case VTRN:
op_encoding = 0x2 * B16 | B7;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
int vm, m;
src.split_code(&vm, &m);
NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
int sz = static_cast<int>(size);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 |
0x3 * B7 | B6 | m * B5 | vm);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | m * B5 |
vm | op_encoding;
}
void Assembler::vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// vzip.<size>(Dn, Dm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code()));
}
static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse.
void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// vzip.<size>(Qn, Qm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
emit(EncodeNeonSizedOp(VZIP, NEON_Q, size, src1.code(), src2.code()));
}
void Assembler::vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
// vuzp.<size>(Dn, Dm) SIMD un-zip (de-interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1100.
emit(EncodeNeonSizedOp(VUZP, NEON_D, size, src1.code(), src2.code()));
}
void Assembler::vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// vuzp.<size>(Qn, Qm) SIMD un-zip (de-interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1100.
emit(EncodeNeonSizedOp(VUZP, NEON_Q, size, src1.code(), src2.code()));
}
void Assembler::vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrev16.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
DCHECK_GT(op_size, static_cast<int>(size));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
B6 | m * B5 | vm;
emit(EncodeNeonSizedOp(VREV16, NEON_Q, size, dst.code(), src.code()));
}
void Assembler::vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrev32.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
emit(EncodeNeonSizedOp(VREV32, NEON_Q, size, dst.code(), src.code()));
}
void Assembler::vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon16, size, dst, src));
// Qd = vrev64.<size>(Qm) SIMD element reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
emit(EncodeNeonSizedOp(VREV64, NEON_Q, size, dst.code(), src.code()));
}
void Assembler::vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon32, size, dst, src));
// vtrn.<size>(Dn, Dm) SIMD element transpose.
// Instruction details available in ARM DDI 0406C.b, A8-1096.
emit(EncodeNeonSizedOp(VTRN, NEON_D, size, src1.code(), src2.code()));
}
void Assembler::vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
void Assembler::vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon64, size, dst, src));
// vtrn.<size>(Qn, Qm) SIMD element transpose.
// Instruction details available in ARM DDI 0406C.b, A8-1096.
emit(EncodeNeonSizedOp(VTRN, NEON_Q, size, src1.code(), src2.code()));
}
// Encode NEON vtbl / vtbx instruction.
static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index, bool vtbx) {
static Instr EncodeNeonVTB(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index, bool vtbx) {
// Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094.
// Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
@ -4667,14 +4827,14 @@ static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
}
void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
void Assembler::vtbl(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, false));
}
void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
void Assembler::vtbx(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, true));
}
@ -4690,6 +4850,7 @@ void Assembler::nop(int type) {
emit(al | 13*B21 | type*B12 | type);
}
void Assembler::pop() { add(sp, sp, Operand(kPointerSize)); }
bool Assembler::IsMovT(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
@ -4873,7 +5034,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
@ -5227,6 +5388,29 @@ void Assembler::PatchConstantPoolAccessInstruction(
}
}
PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
int instructions)
: Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
PatchingAssembler::~PatchingAssembler() {
// Check that we don't have any pending constant pools.
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
// Check that the code was patched as expected.
DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}
void PatchingAssembler::Emit(Address addr) {
emit(reinterpret_cast<Instr>(addr));
}
void PatchingAssembler::FlushICache(Isolate* isolate) {
Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
}
} // namespace internal
} // namespace v8

503
deps/v8/src/arm/assembler-arm.h

@ -114,7 +114,7 @@ struct Register {
kCode_no_reg = -1
};
static const int kNumRegisters = Code::kAfterLast;
static constexpr int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
@ -144,13 +144,13 @@ struct Register {
// r7: context register
// r8: constant pool pointer register if FLAG_enable_embedded_constant_pool.
// r9: lithium scratch
#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
constexpr Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = false;
static const bool kSimdMaskRegisters = false;
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
// Single word VFP register.
struct SwVfpRegister {
@ -162,9 +162,9 @@ struct SwVfpRegister {
kCode_no_reg = -1
};
static const int kMaxNumRegisters = Code::kAfterLast;
static constexpr int kMaxNumRegisters = Code::kAfterLast;
static const int kSizeInBytes = 4;
static constexpr int kSizeInBytes = 4;
bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
@ -180,11 +180,14 @@ struct SwVfpRegister {
SwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = reg_code & 0x1;
*vm = reg_code >> 1;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
int reg_code;
};
@ -201,7 +204,7 @@ struct DwVfpRegister {
kCode_no_reg = -1
};
static const int kMaxNumRegisters = Code::kAfterLast;
static constexpr int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
@ -209,7 +212,7 @@ struct DwVfpRegister {
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kSizeInBytes = 8;
static constexpr int kSizeInBytes = 8;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
@ -226,11 +229,14 @@ struct DwVfpRegister {
DwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
int reg_code;
};
@ -242,10 +248,9 @@ typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
struct LowDwVfpRegister {
public:
static const int kMaxNumLowRegisters = 16;
operator DwVfpRegister() const {
DwVfpRegister r = { reg_code };
return r;
static constexpr int kMaxNumLowRegisters = 16;
constexpr operator DwVfpRegister() const {
return DwVfpRegister { reg_code };
}
static LowDwVfpRegister from_code(int code) {
LowDwVfpRegister r = { code };
@ -282,7 +287,7 @@ struct LowDwVfpRegister {
// Quad word NEON register.
struct QwNeonRegister {
static const int kMaxNumRegisters = 16;
static constexpr int kMaxNumRegisters = 16;
static QwNeonRegister from_code(int code) {
QwNeonRegister r = { code };
@ -297,12 +302,15 @@ struct QwNeonRegister {
DCHECK(is_valid());
return reg_code;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
DwVfpRegister low() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2;
@ -328,101 +336,100 @@ typedef QwNeonRegister Simd128Register;
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 };
const SwVfpRegister s1 = { 1 };
const SwVfpRegister s2 = { 2 };
const SwVfpRegister s3 = { 3 };
const SwVfpRegister s4 = { 4 };
const SwVfpRegister s5 = { 5 };
const SwVfpRegister s6 = { 6 };
const SwVfpRegister s7 = { 7 };
const SwVfpRegister s8 = { 8 };
const SwVfpRegister s9 = { 9 };
const SwVfpRegister s10 = { 10 };
const SwVfpRegister s11 = { 11 };
const SwVfpRegister s12 = { 12 };
const SwVfpRegister s13 = { 13 };
const SwVfpRegister s14 = { 14 };
const SwVfpRegister s15 = { 15 };
const SwVfpRegister s16 = { 16 };
const SwVfpRegister s17 = { 17 };
const SwVfpRegister s18 = { 18 };
const SwVfpRegister s19 = { 19 };
const SwVfpRegister s20 = { 20 };
const SwVfpRegister s21 = { 21 };
const SwVfpRegister s22 = { 22 };
const SwVfpRegister s23 = { 23 };
const SwVfpRegister s24 = { 24 };
const SwVfpRegister s25 = { 25 };
const SwVfpRegister s26 = { 26 };
const SwVfpRegister s27 = { 27 };
const SwVfpRegister s28 = { 28 };
const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
const LowDwVfpRegister d0 = { 0 };
const LowDwVfpRegister d1 = { 1 };
const LowDwVfpRegister d2 = { 2 };
const LowDwVfpRegister d3 = { 3 };
const LowDwVfpRegister d4 = { 4 };
const LowDwVfpRegister d5 = { 5 };
const LowDwVfpRegister d6 = { 6 };
const LowDwVfpRegister d7 = { 7 };
const LowDwVfpRegister d8 = { 8 };
const LowDwVfpRegister d9 = { 9 };
const LowDwVfpRegister d10 = { 10 };
const LowDwVfpRegister d11 = { 11 };
const LowDwVfpRegister d12 = { 12 };
const LowDwVfpRegister d13 = { 13 };
const LowDwVfpRegister d14 = { 14 };
const LowDwVfpRegister d15 = { 15 };
const DwVfpRegister d16 = { 16 };
const DwVfpRegister d17 = { 17 };
const DwVfpRegister d18 = { 18 };
const DwVfpRegister d19 = { 19 };
const DwVfpRegister d20 = { 20 };
const DwVfpRegister d21 = { 21 };
const DwVfpRegister d22 = { 22 };
const DwVfpRegister d23 = { 23 };
const DwVfpRegister d24 = { 24 };
const DwVfpRegister d25 = { 25 };
const DwVfpRegister d26 = { 26 };
const DwVfpRegister d27 = { 27 };
const DwVfpRegister d28 = { 28 };
const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
const QwNeonRegister q0 = { 0 };
const QwNeonRegister q1 = { 1 };
const QwNeonRegister q2 = { 2 };
const QwNeonRegister q3 = { 3 };
const QwNeonRegister q4 = { 4 };
const QwNeonRegister q5 = { 5 };
const QwNeonRegister q6 = { 6 };
const QwNeonRegister q7 = { 7 };
const QwNeonRegister q8 = { 8 };
const QwNeonRegister q9 = { 9 };
const QwNeonRegister q10 = { 10 };
const QwNeonRegister q11 = { 11 };
const QwNeonRegister q12 = { 12 };
const QwNeonRegister q13 = { 13 };
const QwNeonRegister q14 = { 14 };
const QwNeonRegister q15 = { 15 };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
#define kFirstCalleeSavedDoubleReg d8
#define kLastCalleeSavedDoubleReg d15
// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg.
#define kDoubleRegZero d14
#define kScratchDoubleReg d15
constexpr SwVfpRegister s0 = { 0 };
constexpr SwVfpRegister s1 = { 1 };
constexpr SwVfpRegister s2 = { 2 };
constexpr SwVfpRegister s3 = { 3 };
constexpr SwVfpRegister s4 = { 4 };
constexpr SwVfpRegister s5 = { 5 };
constexpr SwVfpRegister s6 = { 6 };
constexpr SwVfpRegister s7 = { 7 };
constexpr SwVfpRegister s8 = { 8 };
constexpr SwVfpRegister s9 = { 9 };
constexpr SwVfpRegister s10 = { 10 };
constexpr SwVfpRegister s11 = { 11 };
constexpr SwVfpRegister s12 = { 12 };
constexpr SwVfpRegister s13 = { 13 };
constexpr SwVfpRegister s14 = { 14 };
constexpr SwVfpRegister s15 = { 15 };
constexpr SwVfpRegister s16 = { 16 };
constexpr SwVfpRegister s17 = { 17 };
constexpr SwVfpRegister s18 = { 18 };
constexpr SwVfpRegister s19 = { 19 };
constexpr SwVfpRegister s20 = { 20 };
constexpr SwVfpRegister s21 = { 21 };
constexpr SwVfpRegister s22 = { 22 };
constexpr SwVfpRegister s23 = { 23 };
constexpr SwVfpRegister s24 = { 24 };
constexpr SwVfpRegister s25 = { 25 };
constexpr SwVfpRegister s26 = { 26 };
constexpr SwVfpRegister s27 = { 27 };
constexpr SwVfpRegister s28 = { 28 };
constexpr SwVfpRegister s29 = { 29 };
constexpr SwVfpRegister s30 = { 30 };
constexpr SwVfpRegister s31 = { 31 };
constexpr DwVfpRegister no_dreg = { -1 };
constexpr LowDwVfpRegister d0 = { 0 };
constexpr LowDwVfpRegister d1 = { 1 };
constexpr LowDwVfpRegister d2 = { 2 };
constexpr LowDwVfpRegister d3 = { 3 };
constexpr LowDwVfpRegister d4 = { 4 };
constexpr LowDwVfpRegister d5 = { 5 };
constexpr LowDwVfpRegister d6 = { 6 };
constexpr LowDwVfpRegister d7 = { 7 };
constexpr LowDwVfpRegister d8 = { 8 };
constexpr LowDwVfpRegister d9 = { 9 };
constexpr LowDwVfpRegister d10 = { 10 };
constexpr LowDwVfpRegister d11 = { 11 };
constexpr LowDwVfpRegister d12 = { 12 };
constexpr LowDwVfpRegister d13 = { 13 };
constexpr LowDwVfpRegister d14 = { 14 };
constexpr LowDwVfpRegister d15 = { 15 };
constexpr DwVfpRegister d16 = { 16 };
constexpr DwVfpRegister d17 = { 17 };
constexpr DwVfpRegister d18 = { 18 };
constexpr DwVfpRegister d19 = { 19 };
constexpr DwVfpRegister d20 = { 20 };
constexpr DwVfpRegister d21 = { 21 };
constexpr DwVfpRegister d22 = { 22 };
constexpr DwVfpRegister d23 = { 23 };
constexpr DwVfpRegister d24 = { 24 };
constexpr DwVfpRegister d25 = { 25 };
constexpr DwVfpRegister d26 = { 26 };
constexpr DwVfpRegister d27 = { 27 };
constexpr DwVfpRegister d28 = { 28 };
constexpr DwVfpRegister d29 = { 29 };
constexpr DwVfpRegister d30 = { 30 };
constexpr DwVfpRegister d31 = { 31 };
constexpr QwNeonRegister q0 = { 0 };
constexpr QwNeonRegister q1 = { 1 };
constexpr QwNeonRegister q2 = { 2 };
constexpr QwNeonRegister q3 = { 3 };
constexpr QwNeonRegister q4 = { 4 };
constexpr QwNeonRegister q5 = { 5 };
constexpr QwNeonRegister q6 = { 6 };
constexpr QwNeonRegister q7 = { 7 };
constexpr QwNeonRegister q8 = { 8 };
constexpr QwNeonRegister q9 = { 9 };
constexpr QwNeonRegister q10 = { 10 };
constexpr QwNeonRegister q11 = { 11 };
constexpr QwNeonRegister q12 = { 12 };
constexpr QwNeonRegister q13 = { 13 };
constexpr QwNeonRegister q14 = { 14 };
constexpr QwNeonRegister q15 = { 15 };
// Aliases for double registers.
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg. SIMD
// code depends on kDoubleRegZero before kScratchDoubleReg.
constexpr LowDwVfpRegister kDoubleRegZero = d14;
constexpr LowDwVfpRegister kScratchDoubleReg = d15;
// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
#define kScratchQuadReg q7
constexpr QwNeonRegister kScratchQuadReg = q7;
// Coprocessor register
struct CRegister {
@ -442,24 +449,24 @@ struct CRegister {
};
const CRegister no_creg = { -1 };
constexpr CRegister no_creg = { -1 };
const CRegister cr0 = { 0 };
const CRegister cr1 = { 1 };
const CRegister cr2 = { 2 };
const CRegister cr3 = { 3 };
const CRegister cr4 = { 4 };
const CRegister cr5 = { 5 };
const CRegister cr6 = { 6 };
const CRegister cr7 = { 7 };
const CRegister cr8 = { 8 };
const CRegister cr9 = { 9 };
const CRegister cr10 = { 10 };
const CRegister cr11 = { 11 };
const CRegister cr12 = { 12 };
const CRegister cr13 = { 13 };
const CRegister cr14 = { 14 };
const CRegister cr15 = { 15 };
constexpr CRegister cr0 = { 0 };
constexpr CRegister cr1 = { 1 };
constexpr CRegister cr2 = { 2 };
constexpr CRegister cr3 = { 3 };
constexpr CRegister cr4 = { 4 };
constexpr CRegister cr5 = { 5 };
constexpr CRegister cr6 = { 6 };
constexpr CRegister cr7 = { 7 };
constexpr CRegister cr8 = { 8 };
constexpr CRegister cr9 = { 9 };
constexpr CRegister cr10 = { 10 };
constexpr CRegister cr11 = { 11 };
constexpr CRegister cr12 = { 12 };
constexpr CRegister cr13 = { 13 };
constexpr CRegister cr14 = { 14 };
constexpr CRegister cr15 = { 15 };
// Coprocessor number
@ -492,9 +499,7 @@ class Operand BASE_EMBEDDED {
// immediate
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) {
return Operand(static_cast<int32_t>(0));
}
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@ -520,7 +525,12 @@ class Operand BASE_EMBEDDED {
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
// Return true if this is a register operand.
INLINE(bool is_reg() const);
INLINE(bool is_reg() const) {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
// Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction,
@ -667,8 +677,8 @@ class NeonListOperand BASE_EMBEDDED {
struct VmovIndex {
unsigned char index;
};
const VmovIndex VmovIndexLo = { 0 };
const VmovIndex VmovIndexHi = { 1 };
constexpr VmovIndex VmovIndexLo = { 0 };
constexpr VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase {
public:
@ -685,7 +695,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
@ -725,6 +737,7 @@ class Assembler : public AssemblerBase {
Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
@ -756,24 +769,24 @@ class Assembler : public AssemblerBase {
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
static const int kSpecialTargetSize = kPointerSize;
static constexpr int kSpecialTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
static constexpr int kInstrSize = sizeof(Instr);
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
static constexpr int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
static constexpr int kPcLoadDelta = 8;
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
static constexpr int kDebugBreakSlotInstructions = 4;
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
// ---------------------------------------------------------------------------
@ -814,9 +827,7 @@ class Assembler : public AssemblerBase {
void sub(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void sub(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) {
sub(dst, src1, Operand(src2), s, cond);
}
SBit s = LeaveCC, Condition cond = al);
void rsb(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@ -824,9 +835,7 @@ class Assembler : public AssemblerBase {
void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void add(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) {
add(dst, src1, Operand(src2), s, cond);
}
SBit s = LeaveCC, Condition cond = al);
void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@ -838,16 +847,13 @@ class Assembler : public AssemblerBase {
SBit s = LeaveCC, Condition cond = al);
void tst(Register src1, const Operand& src2, Condition cond = al);
void tst(Register src1, Register src2, Condition cond = al) {
tst(src1, Operand(src2), cond);
}
void tst(Register src1, Register src2, Condition cond = al);
void teq(Register src1, const Operand& src2, Condition cond = al);
void cmp(Register src1, const Operand& src2, Condition cond = al);
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
void cmp(Register src1, Register src2, Condition cond = al);
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
@ -855,15 +861,11 @@ class Assembler : public AssemblerBase {
void orr(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void orr(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al) {
orr(dst, src1, Operand(src2), s, cond);
}
SBit s = LeaveCC, Condition cond = al);
void mov(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al);
void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
mov(dst, Operand(src), s, cond);
}
void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
// Load the position of the label relative to the generated code object
// pointer in a register.
@ -883,31 +885,13 @@ class Assembler : public AssemblerBase {
// Shift instructions
void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
}
}
Condition cond = al);
void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
}
}
Condition cond = al);
void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
}
}
Condition cond = al);
// Multiply instructions
@ -1337,33 +1321,36 @@ class Assembler : public AssemblerBase {
void vst1(NeonSize size,
const NeonListOperand& src,
const NeonMemOperand& dst);
// dt represents the narrower type
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
// dt represents the narrower type.
void vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src);
// Only unconditional core <-> scalar moves are currently supported.
void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
void vmov(const QwNeonRegister dst, const QwNeonRegister src);
void vmvn(const QwNeonRegister dst, const QwNeonRegister src);
void vmov(QwNeonRegister dst, QwNeonRegister src);
void vdup(NeonSize size, QwNeonRegister dst, Register src);
void vdup(QwNeonRegister dst, SwVfpRegister src);
void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);
void vmvn(QwNeonRegister dst, QwNeonRegister src);
void vswp(DwVfpRegister dst, DwVfpRegister src);
void vswp(QwNeonRegister dst, QwNeonRegister src);
// vdup conditional execution isn't supported.
void vdup(NeonSize size, const QwNeonRegister dst, const Register src);
void vdup(const QwNeonRegister dst, const SwVfpRegister src);
void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vneg(const QwNeonRegister dst, const QwNeonRegister src);
void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vabs(QwNeonRegister dst, QwNeonRegister src);
void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vneg(QwNeonRegister dst, QwNeonRegister src);
void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
@ -1385,6 +1372,10 @@ class Assembler : public AssemblerBase {
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
// vrecpe and vrsqrte only support floating point lanes.
@ -1398,24 +1389,26 @@ class Assembler : public AssemblerBase {
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcge(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcgt(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
void vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
int bytes);
void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
void vtbl(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index);
void vtbx(DwVfpRegister dst, const NeonListOperand& list,
DwVfpRegister index);
// Pseudo instructions
@ -1443,9 +1436,7 @@ class Assembler : public AssemblerBase {
ldr(dst, MemOperand(sp, 4, PostIndex), cond);
}
void pop() {
add(sp, sp, Operand(kPointerSize));
}
void pop();
void vpush(DwVfpRegister src, Condition cond = al) {
vstm(db_w, sp, src, src, cond);
@ -1615,12 +1606,14 @@ class Assembler : public AssemblerBase {
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
// PC-relative loads, thereby defining a maximum distance between the
// instruction and the accessed constant.
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
static constexpr int kMaxDistToIntPool = 4 * KB;
static constexpr int kMaxDistToFPPool = 1 * KB;
// All relocations could be integer, it therefore acts as the limit.
static const int kMinNumPendingConstants = 4;
static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
static constexpr int kMinNumPendingConstants = 4;
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
static constexpr int kMaxNumPending64Constants =
kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1715,15 +1708,33 @@ class Assembler : public AssemblerBase {
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
}
private:
int next_buffer_check_; // pc offset of next buffer check
inline void emit(Instr x);
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
static constexpr int kGap = 32;
// Relocation info generation
// Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
private:
int next_buffer_check_; // pc offset of next buffer check
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@ -1739,8 +1750,8 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckPoolIntervalInst = 32;
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Emission of the constant pool may be blocked in some code sequences.
@ -1752,31 +1763,13 @@ class Assembler : public AssemblerBase {
int first_const_pool_32_use_;
int first_const_pool_64_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
void move_32_bit_immediate(Register rd,
@ -1808,12 +1801,20 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
};
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBuffer();
}
INLINE(explicit EnsureSpace(Assembler* assembler));
};
class PatchingAssembler : public Assembler {
public:
PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
~PatchingAssembler();
void Emit(Address addr);
void FlushICache(Isolate* isolate);
};

406
deps/v8/src/arm/code-stubs-arm.cc

@ -5,14 +5,19 @@
#if V8_TARGET_ARCH_ARM
#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@ -1148,173 +1153,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
// This case is handled prior to the RegExpExecStub call.
__ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
// sp[4]: previous index
// sp[8]: subject string
// sp[12]: JSRegExp object
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
// directly from generated code the native RegExp code will not do a GC and
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
__ cmp(r0, Operand::Zero());
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
__ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ SmiTst(regexp_data);
__ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// regexp_data: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
__ b(ne, &runtime);
// regexp_data: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures * 2 <= offsets vector size - 2
// Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
// Reset offset for possibly sliced string.
__ mov(r9, Operand::Zero());
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
__ mov(r3, subject); // Make a copy of the original subject string.
// subject: subject string
// r3: subject string
// regexp_data: RegExp data (FixedArray)
// Handle subject string according to its encoding and representation:
// (1) Sequential string? If yes, go to (4).
// (2) Sequential or cons? If not, go to (5).
// (3) Cons string. If the string is flat, replace subject with first string
// and go to (1). Otherwise bail out to runtime.
// (4) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
// (5) Long external string? If not, go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
// (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
__ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// (1) Sequential string? If yes, go to (4).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string); // Go to (4).
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
__ b(ge, &not_seq_nor_cons); // Go to (5).
// (3) Cons string. Check that it's flat.
// Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ CompareRoot(r0, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ jmp(&check_underlying);
// (4) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
// subject: sequential subject string (or look-alike, external string)
// r3: original subject string
// Load previous index and check range before r3 is overwritten. We have to
// use r3 instead of subject here because subject might have been only made
// to look like a sequential string when it actually is an external string.
__ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
__ JumpIfNotSmi(r1, &runtime);
__ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
__ cmp(r3, Operand(r1));
__ b(ls, &runtime);
__ SmiUntag(r1);
STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 3), SetCC);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
ne);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
// r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if one_byte, 0 if two_byte);
// r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
@ -1324,228 +1166,61 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
__ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
__ str(r5, MemOperand(sp, 5 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
__ mov(r5, Operand(1));
__ str(r5, MemOperand(sp, 4 * kPointerSize));
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ mov(r5, Operand(address_of_regexp_stack_memory_address));
__ ldr(r5, MemOperand(r5, 0));
__ mov(r6, Operand(address_of_regexp_stack_memory_size));
__ ldr(r6, MemOperand(r6, 0));
__ add(r5, r5, Operand(r6));
__ str(r5, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand::Zero());
__ str(r0, MemOperand(sp, 2 * kPointerSize));
__ mov(r5, Operand::Zero());
__ str(r5, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0,
Operand(ExternalReference::address_of_static_offsets_vector(
isolate())));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for one-byte and 1 for two-byte).
__ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
// sizes below the previous sp. (Because creating a new stack frame pushes
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
__ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
// If slice offset is not 0, load the length from the original sliced string.
// Argument 4, r3: End of string data
// Argument 3, r2: Start of string data
// Prepare start and end index of the input.
__ add(r9, r7, Operand(r9, LSL, r3));
__ add(r2, r9, Operand(r1, LSL, r3));
__ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
__ SmiUntag(r7);
__ add(r3, r9, Operand(r7, LSL, r3));
__ mov(
r5,
Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
__ str(r5, MemOperand(sp, 1 * kPointerSize));
// Argument 4: End of string data
// Argument 3: Start of string data
CHECK(r3.is(RegExpExecDescriptor::StringEndRegister()));
CHECK(r2.is(RegExpExecDescriptor::StringStartRegister()));
// Argument 2 (r1): Previous index.
// Already there
CHECK(r1.is(RegExpExecDescriptor::LastIndexRegister()));
// Argument 1 (r0): Subject string.
__ mov(r0, subject);
CHECK(r0.is(RegExpExecDescriptor::StringRegister()));
// Locate the code entry and call it.
__ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
Register code_reg = RegExpExecDescriptor::CodeRegister();
__ add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, r6);
stub.GenerateCall(masm, code_reg);
__ LeaveExitFrame(false, no_reg, true);
last_match_info_elements = r6;
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
// Check the result.
Label success;
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
__ b(eq, &failure);
__ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ b(ne, &runtime);
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(isolate()->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(isolate()->factory()->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
// Process the result from the native regexp code.
__ bind(&success);
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
// Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
// Check that the last match info is a FixedArray.
__ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ cmp(r2, Operand::SmiUntag(r0));
__ b(gt, &runtime);
// r1: number of capture registers
// r4: subject string
// Store the capture count.
__ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
__ str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset));
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset, subject, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(subject, r2);
__ str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset, subject, r3,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(r2, Operand(address_of_static_offsets_vector));
// r1: number of capture registers
// r2: offsets vector
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wrapping after zero.
__ add(r0, last_match_info_elements,
Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ sub(r1, r1, Operand(1), SetCC);
__ b(mi, &done);
// Read the value from the static offsets vector buffer.
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
// Store the smi value in the last match info.
__ SmiTag(r3);
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
__ mov(r0, last_match_info_elements);
__ add(sp, sp, Operand(4 * kPointerSize));
__ SmiTag(r0);
__ Ret();
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (5) Long external string? If not, go to (7).
__ bind(&not_seq_nor_cons);
// Compare flags are still set.
__ b(gt, &not_long_external); // Go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(r0, Operand(kIsIndirectStringMask));
__ Assert(eq, kExternalStringExpectedButNotFound);
}
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ jmp(&seq_string); // Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
__ bind(&not_long_external);
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ b(ne, &runtime);
// (8) Sliced or thin string. Replace subject with parent. Go to (4).
Label thin_string;
__ cmp(r1, Operand(kThinStringTag));
__ b(eq, &thin_string);
// Load offset into r9 and replace subject string with parent.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r9);
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
__ bind(&thin_string);
__ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
__ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
@ -2566,6 +2241,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,

4
deps/v8/src/arm/code-stubs-arm.h

@ -197,9 +197,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));

14
deps/v8/src/arm/codegen-arm.cc

@ -8,6 +8,7 @@
#include <memory>
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
@ -167,7 +168,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@ -284,7 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@ -464,11 +465,12 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
PatchingAssembler patcher(Assembler::IsolateData(isolate), sequence,
young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->emit_code_stub_address(stub);
patcher.add(r0, pc, Operand(-8));
patcher.ldr(pc, MemOperand(pc, -4));
patcher.emit_code_stub_address(stub);
patcher.FlushICache(isolate);
}
}

40
deps/v8/src/arm/deoptimizer-arm.cc

@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@ -40,16 +42,21 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
{
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
1);
osr_patcher.masm()->bkpt(0);
PatchingAssembler patcher(Assembler::IsolateData(isolate),
code->instruction_start() + osr_offset, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}
}
@ -114,6 +121,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kMaxNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 14);
@ -132,6 +140,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15);
// Push registers s0-s15, and possibly s16-s31, on the stack.
// If s16-s31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, s16, s31, ne);
__ sub(sp, sp, Operand(16 * kFloatSize), LeaveCC, eq);
__ vstm(db_w, sp, s0, s15);
}
// Push all 16 registers (needed to populate FrameDescription::registers_).
@ -143,7 +157,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ str(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
@ -196,11 +210,23 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Copy VFP registers to
// float_registers_[FloatRegister::kMaxNumAllocatableRegisters]
int float_regs_offset = FrameDescription::float_registers_offset();
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
__ ldr(r2, MemOperand(sp, src_offset));
__ str(r2, MemOperand(r1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));

162
deps/v8/src/arm/disasm-arm.cc

@ -1364,12 +1364,6 @@ int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "\n %p %08x",
reinterpret_cast<void*>(instr + Instruction::kInstrSize),
*reinterpret_cast<uint32_t*>(instr + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
Format(instr, "svc'cond 'svc");
}
@ -1582,19 +1576,19 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
}
} else {
const char* sign = instr->Bit(23) != 0 ? "u" : "s";
char sign = instr->Bit(23) != 0 ? 'u' : 's';
int rt = instr->RtValue();
int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i);
"vmov.%c8 r%d, d%d[%d]", sign, rt, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]",
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 r%d, d%d[%d]",
sign, rt, vn, i);
} else {
Unknown(instr);
@ -1867,10 +1861,10 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
switch (instr->Bits(11, 8)) {
case 0x0: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1904,7 +1898,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
case 0x2: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1915,7 +1908,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
@ -1924,7 +1916,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
@ -1934,7 +1925,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
case 0x8: {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
@ -1943,7 +1933,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
case 0x9: {
if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -1953,6 +1942,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
case 0xa: {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d d%d, d%d, d%d",
op, size, Vd, Vn, Vm);
break;
}
case 0xd: {
if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
@ -2052,10 +2049,10 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
switch (instr->Bits(11, 8)) {
case 0x0: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2087,7 +2084,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
case 0x2: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2098,7 +2094,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
@ -2107,7 +2102,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
@ -2116,7 +2110,6 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0x8: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
if (instr->Bit(4) == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
@ -2128,6 +2121,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
case 0xa: {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d d%d, d%d, d%d",
op, size, Vd, Vn, Vm);
break;
}
case 0xd: {
if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
@ -2165,35 +2166,57 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
instr->Bit(4) == 0) {
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
if (instr->Bit(6) == 0) {
} else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
if (instr->Bits(11, 7) == 0x18) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int index = instr->Bit(19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup q%d, d%d[%d]", Vd, Vm, index);
} else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int len = instr->Bits(9, 8);
NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
FormatNeonList(Vn, list.type());
Print(", ");
PrintDRegister(Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x2 &&
instr->Bits(7, 6) != 0) {
// vqmovn.<type><size> Dd, Qm.
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
char type = instr->Bit(6) != 0 ? 'u' : 's';
int size = 2 * kBitsPerByte * (1 << instr->Bits(19, 18));
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vqmovn.%c%i d%d, q%d",
type, size, Vd, Vm);
} else {
int Vd, Vm;
if (instr->Bit(6) == 0) {
Vd = instr->VFPDRegValue(kDoublePrecision);
Vm = instr->VFPMRegValue(kDoublePrecision);
} else {
Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
}
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
if (instr->Bit(6) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp d%d, d%d", Vd, Vm);
} else {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp q%d, q%d", Vd, Vm);
}
} else if (instr->Bits(11, 7) == 0x18) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int index = instr->Bit(19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup q%d, d%d[%d]", Vd, Vm, index);
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmvn q%d, q%d", Vd, Vm);
} else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* suffix = nullptr;
int op = instr->Bits(8, 7);
switch (op) {
@ -2212,63 +2235,66 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vcvt.%s q%d, q%d", suffix, Vd, Vm);
} else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int len = instr->Bits(9, 8);
NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
FormatNeonList(Vn, list.type());
Print(", ");
PrintDRegister(Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 8) == 0x1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
// vzip.<size> Qd, Qm.
const char* op = instr->Bit(7) != 0 ? "vzip" : "vuzp";
if (instr->Bit(6) == 0) {
// vzip/vuzp.<size> Dd, Dm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vzip.%d q%d, q%d", size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
"%s.%d d%d, d%d", op, size, Vd, Vm);
} else {
// vzip/vuzp.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.%d q%d, q%d", op, size, Vd, Vm);
}
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0 &&
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
int op = kBitsPerByte
<< (static_cast<int>(Neon64) - instr->Bits(8, 7));
// vrev<op>.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vrev%d.%d q%d, q%d", op, size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0x1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
const char* type = instr->Bit(10) != 0 ? "f" : "s";
if (instr->Bit(6) == 0) {
// vtrn.<size> Dd, Dm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vtrn.%d d%d, d%d", size, Vd, Vm);
} else {
// vtrn.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vtrn.%d q%d, q%d", size, Vd, Vm);
}
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0 &&
instr->Bit(6) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
char type = instr->Bit(10) != 0 ? 'f' : 's';
if (instr->Bits(9, 6) == 0xd) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d",
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
type, size, Vd, Vm);
} else if (instr->Bits(9, 6) == 0xf) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d",
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
type, size, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5 &&
instr->Bit(6) == 1) {
// vrecpe/vrsqrte.f32 Qd, Qm.
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
}
} else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1 &&
instr->Bit(6) == 1) {
// vshr.u<size> Qd, Qm, shift
int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
int shift = 2 * size - instr->Bits(21, 16);

57
deps/v8/src/arm/interface-descriptors-arm.cc

@ -54,11 +54,15 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
const Register RegExpExecDescriptor::StringRegister() { return r0; }
const Register RegExpExecDescriptor::LastIndexRegister() { return r1; }
const Register RegExpExecDescriptor::StringStartRegister() { return r2; }
const Register RegExpExecDescriptor::StringEndRegister() { return r3; }
const Register RegExpExecDescriptor::CodeRegister() { return r4; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
@ -282,46 +286,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
r2, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
r2, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
r0, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
@ -360,7 +324,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
@ -370,7 +334,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
@ -382,8 +346,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r1, // target to call checked to be Array function
@ -408,7 +372,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume
r2 // the resume mode (tagged)
r2, // the resume mode (tagged)
r3, // SuspendFlags (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

217
deps/v8/src/arm/macro-assembler-arm.cc

@ -6,11 +6,15 @@
#if V8_TARGET_ARCH_ARM
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@ -19,14 +23,19 @@
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
: Assembler(isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
has_frame_(false),
isolate_(isolate),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
@ -236,6 +245,9 @@ void MacroAssembler::Push(Handle<Object> handle) {
push(ip);
}
void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void MacroAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
@ -1154,6 +1166,15 @@ void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
vmov(dt, dst, double_source, double_lane);
}
void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
NeonDataType dt, int lane) {
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> size;
vmov(dt, dst, src, double_lane);
}
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
Register scratch, int lane) {
int s_code = src.code() * 4 + lane;
@ -1892,14 +1913,13 @@ void MacroAssembler::IsObjectJSStringType(Register object,
b(ne, fail);
}
void MacroAssembler::IsObjectNameType(Register object,
Register scratch,
Label* fail) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
cmp(scratch, Operand(LAST_NAME_TYPE));
b(hi, fail);
Condition MacroAssembler::IsObjectStringType(Register obj, Register type,
Condition cond) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
DCHECK_EQ(0u, kStringTag);
return eq;
}
void MacroAssembler::MaybeDropFrames() {
@ -2362,29 +2382,6 @@ void MacroAssembler::CheckMap(Register obj,
}
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
Jump(success, RelocInfo::CODE_TARGET, eq);
bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
mov(scratch, Operand(cell));
ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
cmp(value, scratch);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@ -2397,7 +2394,6 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
JumpIfSmi(value, miss);
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
@ -2700,27 +2696,6 @@ void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
}
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
DCHECK(!elements.is(ip));
Label ok;
push(elements);
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
Abort(kJSObjectWithFastElementsMapHasSlowElements);
bind(&ok);
pop(elements);
}
}
void MacroAssembler::Check(Condition cond, BailoutReason reason) {
Label L;
@ -2812,6 +2787,11 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
}
void MacroAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
Register reg,
@ -2835,6 +2815,13 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
b(ne, not_power_of_two);
}
void MacroAssembler::SmiTag(Register reg, SBit s) {
add(reg, reg, Operand(reg), s);
}
void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
add(dst, src, Operand(src), s);
}
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
@ -2853,6 +2840,24 @@ void MacroAssembler::UntagAndJumpIfSmi(
b(cc, smi_case); // Shifter carry is not set for a smi.
}
void MacroAssembler::SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
}
void MacroAssembler::NonNegativeSmiTst(Register value) {
tst(value, Operand(kSmiTagMask | kSmiSignMask));
}
void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
@ -2862,18 +2867,6 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
b(eq, on_either_smi);
}
void MacroAssembler::AssertNotNumber(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsANumber);
push(object);
CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
pop(object);
Check(ne, kOperandIsANumber);
}
}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -2892,34 +2885,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAString);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
Check(lo, kOperandIsNotAString);
}
}
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAName);
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, LAST_NAME_TYPE);
pop(object);
Check(le, kOperandIsNotAName);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -2945,32 +2910,35 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
// `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
if (!emit_debug_code()) return;
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAGeneratorObject);
push(object);
CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
pop(object);
Check(eq, kOperandIsNotAGeneratorObject);
}
}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAReceiver);
// Load map
Register map = object;
push(object);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
Label async, do_check;
tst(flags, Operand(static_cast<int>(SuspendFlags::kGeneratorTypeMask)));
b(ne, &async);
// Check if JSGeneratorObject
CompareInstanceType(map, object, JS_GENERATOR_OBJECT_TYPE);
jmp(&do_check);
bind(&async);
// Check if JSAsyncGeneratorObject
CompareInstanceType(map, object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
bind(&do_check);
// Restore generator object to register and perform assertion
pop(object);
Check(hs, kOperandIsNotAReceiver);
}
Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@ -3614,6 +3582,22 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
ldr(dst, FieldMemOperand(dst, offset));
}
template <typename Field>
void MacroAssembler::DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> shift << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
mov(dst, Operand(src, LSL, kSmiTagSize - shift));
and_(dst, dst, Operand(mask));
} else if (shift > kSmiTagSize) {
mov(dst, Operand(src, LSR, shift - kSmiTagSize));
and_(dst, dst, Operand(mask));
} else {
and_(dst, src, Operand(mask));
}
}
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Register null_value = r5;
@ -3758,7 +3742,6 @@ bool AreAliased(Register reg1,
}
#endif
CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),

102
deps/v8/src/arm/macro-assembler-arm.h

@ -5,6 +5,7 @@
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/arm/assembler-arm.h"
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/frames.h"
@ -92,6 +93,9 @@ class MacroAssembler: public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
int jit_cookie() const { return jit_cookie_; }
Isolate* isolate() const { return isolate_; }
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
@ -174,7 +178,7 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void Move(Register dst, Smi* smi);
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
@ -332,7 +336,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void Push(Smi* smi);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@ -563,6 +567,7 @@ class MacroAssembler: public Assembler {
void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
@ -658,11 +663,7 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
void InitializeRootRegister();
// ---------------------------------------------------------------------------
// JavaScript invokes
@ -711,10 +712,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
void IsObjectNameType(Register object,
Register scratch,
Label* fail);
// Frame restart support
void MaybeDropFrames();
@ -884,17 +881,6 @@ class MacroAssembler: public Assembler {
Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@ -927,16 +913,8 @@ class MacroAssembler: public Assembler {
// Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged.
Condition IsObjectStringType(Register obj,
Register type,
Condition cond = al) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
DCHECK_EQ(0u, kStringTag);
return eq;
}
Condition IsObjectStringType(Register obj, Register type,
Condition cond = al);
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
@ -1151,7 +1129,6 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason);
@ -1201,12 +1178,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
add(dst, src, Operand(src), s);
}
void SmiTag(Register reg, SBit s = LeaveCC);
void SmiTag(Register dst, Register src, SBit s = LeaveCC);
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
@ -1233,40 +1206,21 @@ class MacroAssembler: public Assembler {
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Test if the register contains a smi (Z == 0 (eq) if true).
inline void SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
}
inline void NonNegativeSmiTst(Register value) {
tst(value, Operand(kSmiTagMask | kSmiSignMask));
}
void SmiTst(Register value);
void NonNegativeSmiTst(Register value);
// Jump if the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
void JumpIfSmi(Register value, Label* smi_label);
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a number, enabled via --debug-code.
void AssertNotNumber(Register object);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@ -1276,10 +1230,7 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
void AssertGeneratorObject(Register object);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
@ -1353,21 +1304,7 @@ class MacroAssembler: public Assembler {
}
template <typename Field>
void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> shift << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
mov(dst, Operand(src, LSL, kSmiTagSize - shift));
and_(dst, dst, Operand(mask));
} else if (shift > kSmiTagSize) {
mov(dst, Operand(src, LSR, shift - kSmiTagSize));
and_(dst, dst, Operand(mask));
} else {
and_(dst, src, Operand(mask));
}
}
void DecodeFieldToSmi(Register dst, Register src);
template<typename Field>
void DecodeFieldToSmi(Register reg) {
@ -1450,15 +1387,16 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool has_frame_;
Isolate* isolate_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
int jit_cookie_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit

1188
deps/v8/src/arm/simulator-arm.cc

File diff suppressed because it is too large

8
deps/v8/src/arm/simulator-arm.h

@ -154,10 +154,10 @@ class Simulator {
void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value);
// Support for NEON.
template <typename T>
void get_q_register(int qreg, T* value);
template <typename T>
void set_q_register(int qreg, const T* value);
template <typename T, int SIZE = kSimd128Size>
void get_neon_register(int reg, T (&value)[SIZE / sizeof(T)]);
template <typename T, int SIZE = kSimd128Size>
void set_neon_register(int reg, const T (&value)[SIZE / sizeof(T)]);
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;

55
deps/v8/src/arm64/assembler-arm64-inl.h

@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@ -691,32 +691,28 @@ Address RelocInfo::constant_pool_entry_address() {
return Assembler::target_pointer_address_at(pc_);
}
Object* RelocInfo::target_object() {
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
return HeapObject::cast(
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_, host_)));
return Handle<HeapObject>(
reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target,
void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@ -745,13 +741,12 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
set_target_address(target, write_barrier_mode, icache_flush_mode);
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
}
@ -776,13 +771,11 @@ void RelocInfo::set_target_cell(Cell* cell,
}
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Object>();
return Handle<Code>();
}
@ -813,27 +806,25 @@ Address RelocInfo::debug_call_address() {
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_debug_call_address(Address target) {
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(isolate_, pc_, host_, target);
Assembler::set_target_address_at(isolate, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
void RelocInfo::WipeOut() {
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
Assembler::set_target_address_at(isolate, pc_, host_, NULL);
}
}

63
deps/v8/src/arm64/assembler-arm64.cc

@ -28,7 +28,6 @@
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
@ -200,13 +199,14 @@ uint32_t RelocInfo::wasm_function_table_size_reference() {
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_size(uint32_t size,
void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
@ -528,7 +528,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
}
assm_->dc64(data);
}
@ -544,7 +544,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@ -553,8 +553,8 @@ void ConstPool::EmitEntries() {
// Assembler
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_() {
@ -675,22 +675,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(isolate(), next_link);
prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
link->SetImmPCOffsetTarget(isolate(), label_veneer);
link->SetImmPCOffsetTarget(isolate_data(), label_veneer);
link = next_link;
}
} else {
@ -761,10 +761,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
PatchingAssembler patcher(isolate(), link, 2);
PatchingAssembler patcher(isolate_data(), reinterpret_cast<byte*>(link),
2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
link->SetImmPCOffsetTarget(isolate(),
link->SetImmPCOffsetTarget(isolate_data(),
reinterpret_cast<Instruction*>(pc_));
}
@ -1697,19 +1698,19 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
void Assembler::ldar(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxr(const Register& rs, const Register& rt,
@ -1717,25 +1718,25 @@ void Assembler::stlxr(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldarb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxrb(const Register& rs, const Register& rt,
@ -1743,25 +1744,25 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldarh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::ldaxrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::stlxrh(const Register& rs, const Register& rt,
@ -1769,7 +1770,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
void Assembler::mov(const Register& rd, const Register& rm) {
@ -2948,7 +2949,7 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
@ -2978,8 +2979,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
rmode, RecordedAstId().ToInt(), NULL);
RelocInfo reloc_info_with_ast_id(reinterpret_cast<byte*>(pc_), rmode,
RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@ -3068,7 +3069,7 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL,
static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@ -3111,7 +3112,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(isolate(), veneer);
branch->SetImmPCOffsetTarget(isolate_data(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=

170
deps/v8/src/arm64/assembler-arm64.h

@ -10,6 +10,7 @@
#include <map>
#include <vector>
#include "src/arm64/constants-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
#include "src/globals.h"
@ -63,8 +64,8 @@ namespace internal {
R(d25) R(d26) R(d27) R(d28)
// clang-format on
static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
// Some CPURegister methods can return Register and FPRegister types, so we
// need to declare them in advance.
@ -90,6 +91,11 @@ struct CPURegister {
kNoRegister
};
constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type)
: reg_code(reg_code), reg_size(reg_size), reg_type(reg_type) {}
static CPURegister Create(int code, int size, RegisterType type) {
CPURegister r = {code, size, type};
return r;
@ -138,25 +144,9 @@ struct Register : public CPURegister {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
Register() {
reg_code = 0;
reg_size = 0;
reg_type = CPURegister::kNoRegister;
}
explicit Register(const CPURegister& r) {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
constexpr Register() : CPURegister() {}
Register(const Register& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
@ -170,7 +160,7 @@ struct Register : public CPURegister {
// These memebers are necessary for compilation.
// A few of them may be unused for now.
static const int kNumRegisters = kNumberOfRegisters;
static constexpr int kNumRegisters = kNumberOfRegisters;
STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; }
@ -197,8 +187,8 @@ struct Register : public CPURegister {
// End of V8 compatibility section -----------------------
};
static const bool kSimpleFPAliasing = true;
static const bool kSimdMaskRegisters = false;
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
struct FPRegister : public CPURegister {
enum Code {
@ -214,25 +204,9 @@ struct FPRegister : public CPURegister {
CPURegister::Create(code, size, CPURegister::kFPRegister));
}
FPRegister() {
reg_code = 0;
reg_size = 0;
reg_type = CPURegister::kNoRegister;
}
explicit FPRegister(const CPURegister& r) {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
constexpr FPRegister() : CPURegister() {}
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
DCHECK(IsValidOrNone());
}
constexpr explicit FPRegister(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
@ -243,7 +217,7 @@ struct FPRegister : public CPURegister {
static FPRegister DRegFromCode(unsigned code);
// Start of V8 compatibility section ---------------------
static const int kMaxNumRegisters = kNumberOfFPRegisters;
static constexpr int kMaxNumRegisters = kNumberOfFPRegisters;
STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
// Crankshaft can use all the FP registers except:
@ -261,54 +235,41 @@ struct FPRegister : public CPURegister {
STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
#if defined(ARM64_DEFINE_REG_STATICS)
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
const CPURegister init_##register_class##_##name = {code, size, type}; \
const register_class& name = *reinterpret_cast<const register_class*>( \
&init_##register_class##_##name)
#define ALIAS_REGISTER(register_class, alias, name) \
const register_class& alias = *reinterpret_cast<const register_class*>( \
&init_##register_class##_##name)
#else
#define INITIALIZE_REGISTER(register_class, name, code, size, type) \
extern const register_class& name
#define DEFINE_REGISTER(register_class, name, code, size, type) \
constexpr register_class name { CPURegister(code, size, type) }
#define ALIAS_REGISTER(register_class, alias, name) \
extern const register_class& alias
#endif // defined(ARM64_DEFINE_REG_STATICS)
constexpr register_class alias = name
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and FPRegister
// variants are provided for convenience.
INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
// v8 compatibility.
INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
#define DEFINE_REGISTERS(N) \
INITIALIZE_REGISTER(Register, w##N, N, \
kWRegSizeInBits, CPURegister::kRegister); \
INITIALIZE_REGISTER(Register, x##N, N, \
kXRegSizeInBits, CPURegister::kRegister);
DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits, CPURegister::kRegister); \
DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits, CPURegister::kRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
CPURegister::kRegister);
INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
CPURegister::kRegister);
#define DEFINE_FPREGISTERS(N) \
INITIALIZE_REGISTER(FPRegister, s##N, N, \
kSRegSizeInBits, CPURegister::kFPRegister); \
INITIALIZE_REGISTER(FPRegister, d##N, N, \
kDRegSizeInBits, CPURegister::kFPRegister);
DEFINE_REGISTER(FPRegister, s##N, N, kSRegSizeInBits, \
CPURegister::kFPRegister); \
DEFINE_REGISTER(FPRegister, d##N, N, kDRegSizeInBits, \
CPURegister::kFPRegister);
GENERAL_REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
#undef DEFINE_FPREGISTERS
#undef INITIALIZE_REGISTER
#undef DEFINE_REGISTER
// Registers aliases.
ALIAS_REGISTER(Register, ip0, x16);
@ -566,8 +527,8 @@ class Immediate {
// -----------------------------------------------------------------------------
// Operands.
const int kSmiShift = kSmiTagSize + kSmiShiftSize;
const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
// Represents an operand in a machine instruction.
class Operand {
@ -756,7 +717,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~Assembler();
@ -807,6 +770,7 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
@ -836,7 +800,7 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
static const int kSpecialTargetSize = kPointerSize;
static constexpr int kSpecialTargetSize = kPointerSize;
// The sizes of the call sequences emitted by MacroAssembler::Call.
// Wherever possible, use MacroAssembler::CallSize instead of these constants,
@ -851,8 +815,8 @@ class Assembler : public AssemblerBase {
// With relocation:
// ldr temp, =target
// blr temp
static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
static const int kCallSizeWithRelocation = 2 * kInstructionSize;
static constexpr int kCallSizeWithoutRelocation = 4 * kInstructionSize;
static constexpr int kCallSizeWithRelocation = 2 * kInstructionSize;
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
@ -884,11 +848,11 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
static const int kPatchDebugBreakSlotAddressOffset = 0;
static constexpr int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
static const int kDebugBreakSlotInstructions = 5;
static const int kDebugBreakSlotLength =
static constexpr int kDebugBreakSlotInstructions = 5;
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
// Prevent contant pool emission until EndBlockConstPool is called.
@ -1847,7 +1811,7 @@ class Assembler : public AssemblerBase {
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
static constexpr int kMaxVeneerCodeSize = 1 * kInstructionSize;
void RecordVeneerPool(int location_offset, int size);
// Emits veneers for branches that are approaching their maximum range.
@ -2000,7 +1964,7 @@ class Assembler : public AssemblerBase {
// suitable for fields that take instruction offsets.
inline int LinkAndGetInstructionOffsetTo(Label* label);
static const int kStartOfLabelLinkChain = 0;
static constexpr int kStartOfLabelLinkChain = 0;
// Verify that a label's link chain is intact.
void CheckLabelLinkChain(Label const * label);
@ -2061,17 +2025,17 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckConstPoolInterval = 128;
static constexpr int kCheckConstPoolInterval = 128;
// Distance to first use after a which a pool will be emitted. Pool entries
// are accessed with pc relative load therefore this cannot be more than
// 1 * MB. Since constant pool emission checks are interval based this value
// is an approximation.
static const int kApproxMaxDistToConstPool = 64 * KB;
static constexpr int kApproxMaxDistToConstPool = 64 * KB;
// Number of pool entries after which a pool will be emitted. Since constant
// pool emission checks are interval based this value is an approximation.
static const int kApproxMaxPoolEntryCount = 512;
static constexpr int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@ -2082,8 +2046,9 @@ class Assembler : public AssemblerBase {
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
@ -2121,7 +2086,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries, and debug strings encoded in the instruction
// stream.
static const int kGap = 128;
static constexpr int kGap = 128;
public:
class FarBranchInfo {
@ -2151,12 +2116,12 @@ class Assembler : public AssemblerBase {
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
static const int kVeneerDistanceMargin = 1 * KB;
static constexpr int kVeneerDistanceMargin = 1 * KB;
// The factor of 2 is a finger in the air guess. With a default margin of
// 1KB, that leaves us an addional 256 instructions to avoid generating a
// protective branch.
static const int kVeneerNoProtectionFactor = 2;
static const int kVeneerDistanceCheckMargin =
static constexpr int kVeneerNoProtectionFactor = 2;
static constexpr int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
DCHECK(!unresolved_branches_.empty());
@ -2195,14 +2160,18 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
: Assembler(isolate, reinterpret_cast<byte*>(start),
count * kInstructionSize + kGap) {
StartBlockPools();
}
// This version will flush at destruction.
PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
: Assembler(isolate, start, count * kInstructionSize + kGap) {
: PatchingAssembler(IsolateData(isolate), start, count) {
CHECK_NOT_NULL(isolate);
isolate_ = isolate;
}
// This version will not flush.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
: Assembler(isolate_data, start, count * kInstructionSize + kGap),
isolate_(nullptr) {
// Block constant pool emission.
StartBlockPools();
}
@ -2217,13 +2186,16 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
Assembler::FlushICache(isolate(), buffer_, length);
if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
static constexpr int kAdrFarPatchableNNops = 2;
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
private:
Isolate* isolate_;
};

487
deps/v8/src/arm64/code-stubs-arm64.cc

@ -4,20 +4,25 @@
#if V8_TARGET_ARCH_ARM64
#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/code-stubs-arm64.h" // Cannot be the first include.
namespace v8 {
namespace internal {
@ -1264,223 +1269,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
// This case is handled prior to the RegExpExecStub call.
__ Abort(kUnexpectedRegExpExecCall);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// jssp[0]: last_match_info (expected JSArray)
// jssp[8]: previous index
// jssp[16]: subject string
// jssp[24]: JSRegExp object
Label runtime;
// Use of registers for this function.
// Variable registers:
// x10-x13 used as scratch registers
// w0 string_type type of subject string
// x2 jsstring_length subject string length
// x3 jsregexp_object JSRegExp object
// w4 string_encoding Latin1 or UC16
// w5 sliced_string_offset if the string is a SlicedString
// offset to the underlying string
// w6 string_representation groups attributes of the string:
// - is a string
// - type of the string
// - is a short external string
Register string_type = w0;
Register jsstring_length = x2;
Register jsregexp_object = x3;
Register string_encoding = w4;
Register sliced_string_offset = w5;
Register string_representation = w6;
// These are in callee save registers and will be preserved by the call
// to the native RegExp code, as this code is called using the normal
// C calling convention. When calling directly from generated code the
// native RegExp code will not do a GC and therefore the content of
// these registers are safe to use after the call.
// x19 subject subject string
// x20 regexp_data RegExp data (FixedArray)
// x21 last_match_info_elements info relative to the last match
// (FixedArray)
// x22 code_object generated regexp code
Register subject = x19;
Register regexp_data = x20;
Register last_match_info_elements = x21;
Register code_object = x22;
// Stack frame.
// jssp[00]: last_match_info (JSArray)
// jssp[08]: previous index
// jssp[16]: subject string
// jssp[24]: JSRegExp object
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_size);
__ Ldr(x10, MemOperand(x10));
__ Cbz(x10, &runtime);
// Check that the first argument is a JSRegExp object.
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(jsregexp_object, kJSRegExpOffset);
__ JumpIfSmi(jsregexp_object, &runtime);
__ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
__ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
__ Tst(regexp_data, kSmiTagMask);
__ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
__ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
__ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
}
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
__ B(ne, &runtime);
// Check that the number of captures fit in the static offsets vector buffer.
// We have always at least one capture for the whole match, plus additional
// ones due to capturing parentheses. A capture takes 2 registers.
// The number of capture registers then is (number_of_captures + 1) * 2.
__ Ldrsw(x10,
UntagSmiFieldMemOperand(regexp_data,
JSRegExp::kIrregexpCaptureCountOffset));
// Check (number_of_captures + 1) * 2 <= offsets vector size
// number_of_captures * 2 <= offsets vector size - 2
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Add(x10, x10, x10);
__ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
__ B(hi, &runtime);
// Initialize offset for possibly sliced string.
__ Mov(sliced_string_offset, 0);
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(subject, kSubjectOffset);
__ JumpIfSmi(subject, &runtime);
__ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
// Handle subject string according to its encoding and representation:
// (1) Sequential string? If yes, go to (4).
// (2) Sequential or cons? If not, go to (5).
// (3) Cons string. If the string is flat, replace subject with first string
// and go to (1). Otherwise bail out to runtime.
// (4) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
// (5) Long external string? If not, go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
// (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label check_underlying; // (1)
Label seq_string; // (4)
Label not_seq_nor_cons; // (5)
Label external_string; // (6)
Label not_long_external; // (7)
__ Bind(&check_underlying);
__ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
__ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
// (1) Sequential string? If yes, go to (4).
__ And(string_representation,
string_type,
kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask);
// We depend on the fact that Strings of type
// SeqString and not ShortExternalString are defined
// by the following pattern:
// string_type: 0XX0 XX00
// ^ ^ ^^
// | | ||
// | | is a SeqString
// | is not a short external String
// is a String
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ Cbz(string_representation, &seq_string); // Go to (4).
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Cmp(string_representation, kExternalStringTag);
__ B(ge, &not_seq_nor_cons); // Go to (5).
// (3) Cons string. Check that it's flat.
__ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
__ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
// Replace subject with first string.
__ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
__ B(&check_underlying);
// (4) Sequential string. Load regexp code according to encoding.
__ Bind(&seq_string);
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kPreviousIndexOffset);
__ JumpIfNotSmi(x10, &runtime);
__ Cmp(jsstring_length, x10);
__ B(ls, &runtime);
// Argument 2 (x1): We need to load argument 2 (the previous index) into x1
// before entering the exit frame.
__ SmiUntag(x1, x10);
// The fourth bit determines the string encoding in string_type.
STATIC_ASSERT(kOneByteStringTag == 0x08);
STATIC_ASSERT(kTwoByteStringTag == 0x00);
STATIC_ASSERT(kStringEncodingMask == 0x08);
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x08
// UC16 = 0x00
__ Ands(string_encoding, string_type, kStringEncodingMask);
__ CzeroX(x10, ne);
__ Add(x10, regexp_data, x10);
__ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
// (E) Carry on. String handling is done.
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(code_object, &runtime);
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
x10,
x11);
// Isolates: note we add an additional parameter here (isolate pointer).
__ EnterExitFrame(false, x10, 1);
DCHECK(csp.Is(__ StackPointer()));
@ -1496,50 +1287,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x10, ExternalReference::isolate_address(isolate()));
__ Poke(x10, kPointerSize);
Register length = w11;
Register previous_index_in_bytes = w12;
Register start = x13;
// Load start of the subject string.
__ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
// sizes below the previous sp. (Because creating a new stack frame pushes
// the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
__ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
__ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
// Handle UC16 encoding, two bytes make one character.
// string_encoding: if Latin1: 0x08
// if UC16: 0x00
STATIC_ASSERT(kStringEncodingMask == 0x08);
__ Ubfx(string_encoding, string_encoding, 3, 1);
__ Eor(string_encoding, string_encoding, 1);
// string_encoding: if Latin1: 0
// if UC16: 1
// Convert string positions from characters to bytes.
// Previous index is in x1.
__ Lsl(previous_index_in_bytes, w1, string_encoding);
__ Lsl(length, length, string_encoding);
__ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
// Argument 1 (x0): Subject string.
__ Mov(x0, subject);
CHECK(x0.is(RegExpExecDescriptor::StringRegister()));
// Argument 2 (x1): Previous index, already there.
CHECK(x1.is(RegExpExecDescriptor::LastIndexRegister()));
// Argument 3 (x2): Get the start of input.
// Start of input = start of string + previous index + substring offset
// (0 if the string
// is not sliced).
__ Add(w10, previous_index_in_bytes, sliced_string_offset);
__ Add(x2, start, Operand(w10, UXTW));
// Argument 4 (x3):
// End of input = start of input + (length of input - previous index)
__ Sub(w10, length, previous_index_in_bytes);
__ Add(x3, x2, Operand(w10, UXTW));
// Argument 3 (x2): Input start.
// Argument 4 (x3): Input end.
CHECK(x2.is(RegExpExecDescriptor::StringStartRegister()));
CHECK(x3.is(RegExpExecDescriptor::StringEndRegister()));
// Argument 5 (x4): static offsets vector buffer.
__ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
@ -1550,6 +1307,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x5, 0);
// Argument 7 (x6): Start (high end) of backtracking stack memory area.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ Mov(x10, address_of_regexp_stack_memory_address);
__ Ldr(x10, MemOperand(x10));
__ Mov(x11, address_of_regexp_stack_memory_size);
@ -1560,184 +1321,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Mov(x7, 1);
// Locate the code entry and call it.
Register code_object = RegExpExecDescriptor::CodeRegister();
__ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code_object);
__ LeaveExitFrame(false, x10, true);
// The generated regexp code returns an int32 in w0.
Label failure, exception;
__ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
__ CompareAndBranch(w0,
NativeRegExpMacroAssembler::EXCEPTION,
eq,
&exception);
__ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
// Success: process the result from the native regexp code.
Register number_of_capture_registers = x12;
// Calculate number of capture registers (number_of_captures + 1) * 2
// and store it in the last match info.
__ Ldrsw(x10,
UntagSmiFieldMemOperand(regexp_data,
JSRegExp::kIrregexpCaptureCountOffset));
__ Add(x10, x10, x10);
__ Add(number_of_capture_registers, x10, 2);
// Check that the last match info is a FixedArray.
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(last_match_info_elements, kLastMatchInfoOffset);
__ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
__ Ldr(x10,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information (overhead).
// (number_of_captures + 1) * 2 + overhead <= last match info size
// (number_of_captures * 2) + 2 + overhead <= last match info size
// number_of_capture_registers + overhead <= last match info size
__ Ldrsw(x10,
UntagSmiFieldMemOperand(last_match_info_elements,
FixedArray::kLengthOffset));
__ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
__ Cmp(x11, x10);
__ B(gt, &runtime);
// Store the capture count.
__ SmiTag(x10, number_of_capture_registers);
__ Str(x10, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
__ Str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset));
// Use x10 as the subject string in order to only need
// one RecordWriteStub.
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastSubjectOffset, x10, x11,
kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Str(subject, FieldMemOperand(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset));
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
RegExpMatchInfo::kLastInputOffset, x10, x11,
kLRHasNotBeenSaved, kDontSaveFPRegs);
Register last_match_offsets = x13;
Register offsets_vector_index = x14;
Register current_offset = x15;
// Get the static offsets vector filled by the native regexp code
// and fill the last match info.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector(isolate());
__ Mov(offsets_vector_index, address_of_static_offsets_vector);
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// iterates down to zero (inclusive).
__ Add(last_match_offsets, last_match_info_elements,
RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
__ Bind(&next_capture);
__ Subs(number_of_capture_registers, number_of_capture_registers, 2);
__ B(mi, &done);
// Read two 32 bit values from the static offsets vector buffer into
// an X register
__ Ldr(current_offset,
MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
STATIC_ASSERT(kSmiTag == 0);
__ Bic(x11, current_offset, kSmiShiftMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
__ B(&next_capture);
__ Bind(&done);
// Return last match info.
__ Mov(x0, last_match_info_elements);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
__ Bind(&exception);
Register exception_value = x0;
// A stack overflow (on the backtrack stack) may have occured
// in the RegExp code but no exception has been created yet.
// If there is no pending exception, handle that in the runtime system.
__ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Mov(x11,
Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ Ldr(exception_value, MemOperand(x11));
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
// Return the smi-tagged result.
__ SmiTag(x0);
__ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (5) Long external string? If not, go to (7).
__ Bind(&not_seq_nor_cons);
// Compare flags are still set.
__ B(ne, &not_long_external); // Go to (7).
// (6) External string. Make it, offset-wise, look like a sequential string.
__ Bind(&external_string);
if (masm->emit_debug_code()) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Tst(x10, kIsIndirectStringMask);
__ Check(eq, kExternalStringExpectedButNotFound);
__ And(x10, x10, kStringRepresentationMask);
__ Cmp(x10, 0);
__ Check(ne, kExternalStringExpectedButNotFound);
}
__ Ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&seq_string); // Go to (4).
// (7) If this is a short external string or not a string, bail out to
// runtime.
__ Bind(&not_long_external);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ TestAndBranchIfAnySet(string_representation,
kShortExternalStringMask | kIsNotStringMask,
&runtime);
// (8) Sliced or thin string. Replace subject with parent.
Label thin_string;
__ Cmp(string_representation, kThinStringTag);
__ B(eq, &thin_string);
__ Ldr(sliced_string_offset,
UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
__ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ B(&check_underlying); // Go to (1).
__ bind(&thin_string);
__ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
__ B(&check_underlying); // Go to (1).
#endif
}
@ -2509,6 +2102,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
@ -2566,6 +2190,9 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,

40
deps/v8/src/arm64/code-stubs-arm64.h

@ -130,9 +130,7 @@ class RecordWriteStub: public PlatformCodeStub {
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
stub->GetIsolate(),
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
@ -172,37 +170,7 @@ class RecordWriteStub: public PlatformCodeStub {
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
RegisterAllocation(Register object, Register address, Register scratch);
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
@ -288,9 +256,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));

2
deps/v8/src/arm64/codegen-arm64.cc

@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"

11
deps/v8/src/arm64/constants-arm64.h

@ -199,7 +199,14 @@ const unsigned kFloatExponentBits = 8;
V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits)
V_(CRm, 11, 8, Bits) \
\
/* Load-/store-exclusive */ \
V_(LoadStoreXLoad, 22, 22, Bits) \
V_(LoadStoreXNotExclusive, 23, 23, Bits) \
V_(LoadStoreXAcquireRelease, 15, 15, Bits) \
V_(LoadStoreXSizeLog2, 31, 30, Bits) \
V_(LoadStoreXPair, 21, 21, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \
@ -857,7 +864,7 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET
};
// Load/store acquire/release
// Load/store acquire/release.
enum LoadStoreAcquireReleaseOp {
LoadStoreAcquireReleaseFixed = 0x08000000,
LoadStoreAcquireReleaseFMask = 0x3F000000,

53
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
@ -94,11 +97,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_fp_registers);
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kFPRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
@ -110,10 +119,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSize) +
(saved_fp_registers.Count() * kDRegSize);
(saved_double_registers.Count() * kDRegSize) +
(saved_float_registers.Count() * kSRegSize);
// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
const int kFloatRegistersOffset = saved_registers.Count() * kXRegSize;
const int kDoubleRegistersOffset =
kFloatRegistersOffset + saved_float_registers.Count() * kSRegSize;
// Get the bailout id from the stack.
Register bailout_id = x2;
@ -165,17 +177,28 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame.
CPURegList copy_fp_to_input = saved_fp_registers;
for (int i = 0; i < saved_fp_registers.Count(); i++) {
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
// Copy double registers to the input frame.
CPURegList copy_double_to_input = saved_double_registers;
for (int i = 0; i < saved_double_registers.Count(); i++) {
int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
CPURegister reg = copy_fp_to_input.PopLowestIndex();
CPURegister reg = copy_double_to_input.PopLowestIndex();
int dst_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Copy float registers to the input frame.
CPURegList copy_float_to_input = saved_float_registers;
for (int i = 0; i < saved_float_registers.Count(); i++) {
int src_offset = kFloatRegistersOffset + (i * kFloatSize);
__ Peek(w2, src_offset);
CPURegister reg = copy_float_to_input.PopLowestIndex();
int dst_offset =
FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
__ Str(w2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@ -241,11 +264,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_double_registers.IncludesAliasOf(fp_zero) &&
!saved_double_registers.IncludesAliasOf(fp_scratch));
while (!saved_double_registers.IsEmpty()) {
const CPURegister reg = saved_double_registers.PopLowestIndex();
int src_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Ldr(reg, MemOperand(x1, src_offset));

11
deps/v8/src/arm64/disasm-arm64.cc

@ -916,10 +916,10 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xn]";
const char *form_x = "'Xt, ['Xn]";
const char *form_stlx = "'Ws, 'Wt, ['Xn]";
const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
const char* form = "'Wt, ['Xns]";
const char* form_x = "'Xt, ['Xns]";
const char* form_stlx = "'Ws, 'Wt, ['Xns]";
const char* form_stlx_x = "'Ws, 'Xt, ['Xns]";
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
case LDAXR_b: mnemonic = "ldaxrb"; break;
@ -938,7 +938,8 @@ void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
default: form = "(LoadStoreAcquireReleaseMask)";
default:
form = "(LoadStoreAcquireRelease)";
}
Format(instr, mnemonic, form);
}

1
deps/v8/src/arm64/eh-frame-arm64.cc

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/assembler-arm64-inl.h"
#include "src/eh-frame.h"
namespace v8 {

21
deps/v8/src/arm64/instructions-arm64.cc

@ -218,22 +218,22 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
void Instruction::SetImmPCOffsetTarget(Assembler::IsolateData isolate_data,
Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(isolate, target);
SetPCRelImmTarget(isolate_data, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
SetUnresolvedInternalReferenceImmTarget(isolate, target);
SetUnresolvedInternalReferenceImmTarget(isolate_data, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
}
}
void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(isolate, this,
PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this),
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@ -283,9 +283,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(
Assembler::IsolateData isolate_data, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@ -294,7 +293,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
PatchingAssembler patcher(isolate, this, 2);
PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this), 2);
patcher.brk(high16);
patcher.brk(low16);
}

10
deps/v8/src/arm64/instructions-arm64.h

@ -7,13 +7,13 @@
#include "src/arm64/constants-arm64.h"
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
void SetImmPCOffsetTarget(AssemblerBase::IsolateData isolate_data,
Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(AssemblerBase::IsolateData,
Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@ -411,7 +412,8 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Isolate* isolate, Instruction* target);
void SetPCRelImmTarget(AssemblerBase::IsolateData isolate_data,
Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

4
deps/v8/src/arm64/instrument-arm64.cc

@ -61,7 +61,6 @@ typedef struct {
CounterType type;
} CounterDescriptor;
static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative},
@ -83,17 +82,18 @@ static const CounterDescriptor kCounterList[] = {
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Load Acquire", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"Store Release", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {

56
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -57,6 +57,11 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register RegExpExecDescriptor::StringRegister() { return x0; }
const Register RegExpExecDescriptor::LastIndexRegister() { return x1; }
const Register RegExpExecDescriptor::StringStartRegister() { return x2; }
const Register RegExpExecDescriptor::StringEndRegister() { return x3; }
const Register RegExpExecDescriptor::CodeRegister() { return x8; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@ -310,46 +315,6 @@ void StringAddDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
x2, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
x2, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
x0, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
@ -388,7 +353,7 @@ void InterpreterDispatchDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
@ -398,7 +363,7 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
@ -410,8 +375,8 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x1, // target to call checked to be Array function
@ -436,7 +401,8 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume
x2 // the resume mode (tagged)
x2, // the resume mode (tagged)
x3 // SuspendFlags (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

27
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -12,9 +12,8 @@
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/base/bits.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@ -37,12 +36,6 @@ MemOperand UntagSmiMemOperand(Register object, int offset) {
}
Handle<Object> MacroAssembler::CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
void MacroAssembler::And(const Register& rd,
const Register& rn,
const Operand& operand) {
@ -1239,6 +1232,14 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_));
@ -1441,14 +1442,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
Bic(untagged_obj, obj, kHeapObjectTag);
}
void MacroAssembler::IsObjectNameType(Register object,
Register type,
Label* fail) {
CompareObjectType(object, type, type, LAST_NAME_TYPE);
B(hi, fail);
}
void MacroAssembler::jmp(Label* L) { B(L); }
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
@ -1477,6 +1471,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
Push(tmp);
}
void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);

176
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -4,16 +4,19 @@
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/frames-arm64.h"
#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
namespace v8 {
namespace internal {
@ -21,23 +24,23 @@ namespace internal {
// Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
#define __
MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, buffer_size),
: Assembler(isolate, buffer, buffer_size),
generating_stub_(false),
#if DEBUG
allow_macro_instructions_(true),
#endif
has_frame_(false),
isolate_(isolate),
use_real_aborts_(true),
sp_(jssp),
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_);
}
}
@ -1232,6 +1235,12 @@ void MacroAssembler::PopPostamble(Operand total_size) {
}
}
void MacroAssembler::PushPreamble(int count, int size) {
PushPreamble(count * size);
}
void MacroAssembler::PopPostamble(int count, int size) {
PopPostamble(count * size);
}
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
@ -1428,6 +1437,21 @@ void MacroAssembler::LoadHeapObject(Register result,
Mov(result, Operand(object));
}
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
DCHECK(object->IsSmi());
Mov(result, Operand(object));
}
}
void MacroAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void MacroAssembler::Move(Register dst, Handle<Object> x) {
LoadObject(dst, x);
}
void MacroAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
@ -1595,20 +1619,6 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
}
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAName);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(temp, temp, LAST_NAME_TYPE);
Check(ls, kOperandIsNotAName);
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
@ -1634,31 +1644,36 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (emit_debug_code()) {
void MacroAssembler::AssertGeneratorObject(Register object, Register flags) {
// `flags` should be an untagged integer. See `SuspendFlags` in src/globals.h
if (!emit_debug_code()) return;
AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
// Load map
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
Check(eq, kOperandIsNotAGeneratorObject);
}
}
// Load instance type
Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
Label async, do_check;
STATIC_ASSERT(static_cast<int>(SuspendFlags::kGeneratorTypeMask) == 4);
DCHECK(!temp.is(flags));
B(&async, reg_bit_set, flags, 2);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Check if JSGeneratorObject
Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
jmp(&do_check);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
Check(hs, kOperandIsNotAReceiver);
}
}
bind(&async);
// Check if JSAsyncGeneratorObject
Cmp(temp, JS_ASYNC_GENERATOR_OBJECT_TYPE);
bind(&do_check);
// Restore generator object to register and perform assertion
Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
@ -1674,20 +1689,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(ne, kOperandIsASmiAndNotAString);
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
Check(lo, kOperandIsNotAString);
}
}
void MacroAssembler::AssertPositiveOrZero(Register value) {
if (emit_debug_code()) {
Label done;
@ -1698,28 +1699,6 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
void MacroAssembler::AssertNotNumber(Register value) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(value, kSmiTagMask);
Check(ne, kOperandIsANumber);
Label done;
JumpIfNotHeapNumber(value, &done);
Abort(kOperandIsANumber);
Bind(&done);
}
}
void MacroAssembler::AssertNumber(Register value) {
if (emit_debug_code()) {
Label done;
JumpIfSmi(value, &done);
JumpIfHeapNumber(value, &done);
Abort(kOperandIsNotANumber);
Bind(&done);
}
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@ -3331,30 +3310,6 @@ void MacroAssembler::CheckMap(Register obj_map,
}
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
B(ne, &fail);
Jump(success, RelocInfo::CODE_TARGET);
Bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
Mov(scratch, Operand(cell));
Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
Cmp(value, scratch);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
@ -3384,7 +3339,6 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
@ -3683,6 +3637,13 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
@ -4082,20 +4043,6 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
}
void MacroAssembler::AssertFastElements(Register elements) {
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Label ok;
Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
Abort(kJSObjectWithFastElementsMapHasSlowElements);
Bind(&ok);
}
}
void MacroAssembler::AssertIsString(const Register& object) {
if (emit_debug_code()) {
@ -4584,6 +4531,13 @@ CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
return reg;
}
MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
#define __ masm->

93
deps/v8/src/arm64/macro-assembler-arm64.h

@ -167,7 +167,12 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
CodeObjectRequired create_code_object);
inline Handle<Object> CodeObject();
Isolate* isolate() const { return isolate_; }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Instruction set functions ------------------------------------------------
// Logical macros.
@ -672,7 +677,7 @@ class MacroAssembler : public Assembler {
// This is a convenience method for pushing a single Handle<Object>.
inline void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
inline void Push(Smi* smi);
// Aliases of Push and Pop, required for V8 compatibility.
inline void push(Register src) {
@ -872,14 +877,7 @@ class MacroAssembler : public Assembler {
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer.
inline void AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
}
inline void AlignAndSetCSPForFrame();
// Push the system stack pointer (csp) down to allow the same to be done to
// the current stack pointer (according to StackPointer()). This must be
@ -923,23 +921,15 @@ class MacroAssembler : public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
DCHECK(object->IsSmi());
Mov(result, Operand(object));
}
}
void LoadObject(Register result, Handle<Object> object);
static int SafepointRegisterStackIndex(int reg_code);
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void Move(Register dst, Register src);
void Move(Register dst, Handle<Object> x);
void Move(Register dst, Smi* src);
void LoadInstanceDescriptors(Register map,
Register descriptors);
@ -1004,38 +994,25 @@ class MacroAssembler : public Assembler {
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
void AssertGeneratorObject(Register object);
void AssertGeneratorObject(Register object, Register suspend_flags);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code.
void AssertPositiveOrZero(Register value);
// Abort execution if argument is not a number (heap number or smi).
void AssertNumber(Register value);
void AssertNotNumber(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@ -1112,7 +1089,7 @@ class MacroAssembler : public Assembler {
// ---- Calling / Jumping helpers ----
// This is required for compatibility in architecture indepenedant code.
inline void jmp(Label* L) { B(L); }
inline void jmp(Label* L);
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub);
@ -1445,16 +1422,6 @@ class MacroAssembler : public Assembler {
Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
@ -1485,13 +1452,6 @@ class MacroAssembler : public Assembler {
Heap::RootListIndex index,
Label* if_not_equal);
// Load and check the instance type of an object for being a unique name.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
// will be overwritten with the type.
// Fall-through if the object was a string and jump on fail otherwise.
inline void IsObjectNameType(Register object, Register type, Label* fail);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
@ -1665,15 +1625,11 @@ class MacroAssembler : public Assembler {
void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst) {
Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src) {
Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
@ -1808,7 +1764,6 @@ class MacroAssembler : public Assembler {
Register reg,
Heap::RootListIndex index,
BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
void AssertFastElements(Register elements);
// Abort if the specified register contains the invalid color bit pattern.
// The pattern must be in bits [1:0] of 'reg' register.
@ -1922,8 +1877,8 @@ class MacroAssembler : public Assembler {
void PushPreamble(Operand total_size);
void PopPostamble(Operand total_size);
void PushPreamble(int count, int size) { PushPreamble(count * size); }
void PopPostamble(int count, int size) { PopPostamble(count * size); }
void PushPreamble(int count, int size);
void PopPostamble(int count, int size);
private:
// The actual Push and Pop implementations. These don't generate any code
@ -1977,6 +1932,7 @@ class MacroAssembler : public Assembler {
bool allow_macro_instructions_;
#endif
bool has_frame_;
Isolate* isolate_;
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
@ -2118,15 +2074,8 @@ class UseScratchRegisterScope {
RegList old_availablefp_; // kFPRegister
};
inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
MemOperand ContextMemOperand(Register context, int index = 0);
MemOperand NativeContextMemOperand();
// Encode and decode information about patchable inline SMI checks.
class InlineSmiCheckInfo {

314
deps/v8/src/arm64/simulator-arm64.cc

@ -10,10 +10,11 @@
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/assembler.h"
#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/runtime/runtime-utils.h"
@ -55,6 +56,9 @@ TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
// static
base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
LAZY_INSTANCE_INITIALIZER;
// This is basically the same as PrintF, with a guard for FLAG_trace_sim.
void Simulator::TraceSim(const char* format, ...) {
@ -429,6 +433,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() {
global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) {
delete instrument_;
@ -1628,6 +1633,15 @@ void Simulator::LoadStoreHelper(Instruction* instr,
uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
} else {
local_monitor_.NotifyStore(address);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@ -1730,6 +1744,19 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (instr->IsLoad()) {
local_monitor_.NotifyLoad(address);
local_monitor_.NotifyLoad(address2);
} else {
local_monitor_.NotifyStore(address);
local_monitor_.NotifyStore(address2);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
global_monitor_.Pointer()->NotifyStore_Locked(address2,
&global_monitor_processor_);
}
// Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible
// to be interrupted in between. The simulator is not thread safe and V8 does
@ -1853,6 +1880,9 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
local_monitor_.NotifyLoad(address);
switch (instr->Mask(LoadLiteralMask)) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_FP_REGS), then print a more detailed log.
@ -1906,8 +1936,108 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
}
Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
switch (size) {
case 0:
return TransactionSize::None;
case 1:
return TransactionSize::Byte;
case 2:
return TransactionSize::HalfWord;
case 4:
return TransactionSize::Word;
default:
UNREACHABLE();
}
return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
// TODO(binji)
unsigned rt = instr->Rt();
unsigned rn = instr->Rn();
LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>(
instr->Mask(LoadStoreAcquireReleaseMask));
int32_t is_acquire_release = instr->LoadStoreXAcquireRelease();
int32_t is_exclusive = (instr->LoadStoreXNotExclusive() == 0);
int32_t is_load = instr->LoadStoreXLoad();
int32_t is_pair = instr->LoadStoreXPair();
USE(is_acquire_release);
USE(is_pair);
DCHECK_NE(is_acquire_release, 0); // Non-acquire/release unimplemented.
DCHECK_EQ(is_pair, 0); // Pair unimplemented.
unsigned access_size = 1 << instr->LoadStoreXSizeLog2();
uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset);
DCHECK_EQ(address % access_size, 0);
base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
if (is_load != 0) {
if (is_exclusive) {
local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size));
global_monitor_.Pointer()->NotifyLoadExcl_Locked(
address, &global_monitor_processor_);
} else {
local_monitor_.NotifyLoad(address);
}
switch (op) {
case LDAR_b:
case LDAXR_b:
set_wreg_no_log(rt, MemoryRead<uint8_t>(address));
break;
case LDAR_h:
case LDAXR_h:
set_wreg_no_log(rt, MemoryRead<uint16_t>(address));
break;
case LDAR_w:
case LDAXR_w:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
break;
default:
UNIMPLEMENTED();
}
LogRead(address, access_size, rt);
} else {
if (is_exclusive) {
unsigned rs = instr->Rs();
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
address, &global_monitor_processor_)) {
switch (op) {
case STLXR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
break;
case STLXR_h:
MemoryWrite<uint16_t>(address, wreg(rt));
break;
case STLXR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
default:
UNIMPLEMENTED();
}
LogWrite(address, access_size, rt);
set_wreg(rs, 0);
} else {
set_wreg(rs, 1);
}
} else {
local_monitor_.NotifyStore(address);
global_monitor_.Pointer()->NotifyStore_Locked(address,
&global_monitor_processor_);
switch (op) {
case STLR_b:
MemoryWrite<uint8_t>(address, wreg(rt));
break;
case STLR_h:
MemoryWrite<uint16_t>(address, wreg(rt));
break;
case STLR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
default:
UNIMPLEMENTED();
}
}
}
}
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
@ -3877,6 +4007,186 @@ void Simulator::DoPrintf(Instruction* instr) {
delete[] format;
}
Simulator::LocalMonitor::LocalMonitor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
size_(TransactionSize::None) {}
void Simulator::LocalMonitor::Clear() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
size_ = TransactionSize::None;
}
void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive load could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on load.
Clear();
}
}
void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr,
TransactionSize size) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
size_ = size;
}
void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the local monitor. As a result, it's
// most strict to unconditionally clear the local monitor on store.
Clear();
}
}
bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr,
TransactionSize size) {
if (access_state_ == MonitorAccess::Exclusive) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.1), so this comparison does not mask addr.
if (addr == tagged_addr_ && size_ == size) {
Clear();
return true;
} else {
// It is implementation-defined whether an exclusive store to a
// non-tagged address will update memory. As a result, it's most strict
// to unconditionally clear the local monitor.
Clear();
return false;
}
} else {
DCHECK(access_state_ == MonitorAccess::Open);
return false;
}
}
Simulator::GlobalMonitor::Processor::Processor()
: access_state_(MonitorAccess::Open),
tagged_addr_(0),
next_(nullptr),
prev_(nullptr),
failure_counter_(0) {}
void Simulator::GlobalMonitor::Processor::Clear_Locked() {
access_state_ = MonitorAccess::Open;
tagged_addr_ = 0;
}
void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(
uintptr_t addr) {
access_state_ = MonitorAccess::Exclusive;
tagged_addr_ = addr;
}
void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
// A non exclusive store could clear the global monitor. As a result, it's
// most strict to unconditionally clear global monitors on store.
Clear_Locked();
}
}
bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
uintptr_t addr, bool is_requesting_processor) {
if (access_state_ == MonitorAccess::Exclusive) {
if (is_requesting_processor) {
// It is allowed for a processor to require that the address matches
// exactly (B2.10.2), so this comparison does not mask addr.
if (addr == tagged_addr_) {
Clear_Locked();
// Introduce occasional stxr failures. This is to simulate the
// behavior of hardware, which can randomly fail due to background
// cache evictions.
if (failure_counter_++ >= kMaxFailureCounter) {
failure_counter_ = 0;
return false;
} else {
return true;
}
}
} else if ((addr & kExclusiveTaggedAddrMask) ==
(tagged_addr_ & kExclusiveTaggedAddrMask)) {
// Check the masked addresses when responding to a successful lock by
// another processor so the implementation is more conservative (i.e. the
// granularity of locking is as large as possible.)
Clear_Locked();
return false;
}
}
return false;
}
Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr,
Processor* processor) {
processor->NotifyLoadExcl_Locked(addr);
PrependProcessor_Locked(processor);
}
void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr,
Processor* processor) {
// Notify each processor of the store operation.
for (Processor* iter = head_; iter; iter = iter->next_) {
bool is_requesting_processor = iter == processor;
iter->NotifyStore_Locked(addr, is_requesting_processor);
}
}
bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr,
Processor* processor) {
DCHECK(IsProcessorInLinkedList_Locked(processor));
if (processor->NotifyStoreExcl_Locked(addr, true)) {
// Notify the other processors that this StoreExcl succeeded.
for (Processor* iter = head_; iter; iter = iter->next_) {
if (iter != processor) {
iter->NotifyStoreExcl_Locked(addr, false);
}
}
return true;
} else {
return false;
}
}
bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
Processor* processor) const {
return head_ == processor || processor->next_ || processor->prev_;
}
void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
if (IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (head_) {
head_->prev_ = processor;
}
processor->prev_ = nullptr;
processor->next_ = head_;
head_ = processor;
}
void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
base::LockGuard<base::Mutex> lock_guard(&mutex);
if (!IsProcessorInLinkedList_Locked(processor)) {
return;
}
if (processor->prev_) {
processor->prev_->next_ = processor->next_;
} else {
head_ = processor->next_;
}
if (processor->next_) {
processor->next_->prev_ = processor->prev_;
}
processor->prev_ = nullptr;
processor->next_ = nullptr;
}
#endif // USE_SIMULATOR

91
deps/v8/src/arm64/simulator-arm64.h

@ -865,6 +865,97 @@ class Simulator : public DecoderVisitor {
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
// Synchronization primitives. See ARM DDI 0487A.a, B2.10. Pair types not
// implemented.
enum class MonitorAccess {
Open,
Exclusive,
};
enum class TransactionSize {
None = 0,
Byte = 1,
HalfWord = 2,
Word = 4,
};
TransactionSize get_transaction_size(unsigned size);
// The least-significant bits of the address are ignored. The number of bits
// is implementation-defined, between 3 and 11. See ARM DDI 0487A.a, B2.10.3.
static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
class LocalMonitor {
public:
LocalMonitor();
// These functions manage the state machine for the local monitor, but do
// not actually perform loads and stores. NotifyStoreExcl only returns
// true if the exclusive store is allowed; the global monitor will still
// have to be checked to see whether the memory should be updated.
void NotifyLoad(uintptr_t addr);
void NotifyLoadExcl(uintptr_t addr, TransactionSize size);
void NotifyStore(uintptr_t addr);
bool NotifyStoreExcl(uintptr_t addr, TransactionSize size);
private:
void Clear();
MonitorAccess access_state_;
uintptr_t tagged_addr_;
TransactionSize size_;
};
class GlobalMonitor {
public:
GlobalMonitor();
class Processor {
public:
Processor();
private:
friend class GlobalMonitor;
// These functions manage the state machine for the global monitor, but do
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadExcl_Locked(uintptr_t addr);
void NotifyStore_Locked(uintptr_t addr, bool is_requesting_processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, bool is_requesting_processor);
MonitorAccess access_state_;
uintptr_t tagged_addr_;
Processor* next_;
Processor* prev_;
// A stxr can fail due to background cache evictions. Rather than
// simulating this, we'll just occasionally introduce cases where an
// exclusive store fails. This will happen once after every
// kMaxFailureCounter exclusive stores.
static const int kMaxFailureCounter = 5;
int failure_counter_;
};
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
void NotifyLoadExcl_Locked(uintptr_t addr, Processor* processor);
void NotifyStore_Locked(uintptr_t addr, Processor* processor);
bool NotifyStoreExcl_Locked(uintptr_t addr, Processor* processor);
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
private:
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
Processor* head_;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
private:
void Init(FILE* stream);

82
deps/v8/src/asmjs/asm-js.cc

@ -6,6 +6,7 @@
#include "src/api-natives.h"
#include "src/api.h"
#include "src/asmjs/asm-parser.h"
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
@ -164,11 +165,52 @@ bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
} // namespace
MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
wasm::ZoneBuffer* module = nullptr;
wasm::ZoneBuffer* asm_offsets = nullptr;
Handle<FixedArray> uses_array;
Handle<FixedArray> foreign_globals;
base::ElapsedTimer asm_wasm_timer;
asm_wasm_timer.Start();
wasm::AsmWasmBuilder builder(info);
Handle<FixedArray> foreign_globals;
if (FLAG_fast_validate_asm) {
wasm::AsmJsParser parser(info->isolate(), info->zone(), info->script(),
info->literal()->start_position(),
info->literal()->end_position());
if (!parser.Run()) {
DCHECK(!info->isolate()->has_pending_exception());
if (!FLAG_suppress_asm_messages) {
MessageLocation location(info->script(), parser.failure_location(),
parser.failure_location());
Handle<String> message =
info->isolate()
->factory()
->NewStringFromUtf8(CStrVector(parser.failure_message()))
.ToHandleChecked();
Handle<JSMessageObject> error_message =
MessageHandler::MakeMessageObject(
info->isolate(), MessageTemplate::kAsmJsInvalid, &location,
message, Handle<JSArray>::null());
error_message->set_error_level(v8::Isolate::kMessageWarning);
MessageHandler::ReportMessage(info->isolate(), &location,
error_message);
}
return MaybeHandle<FixedArray>();
}
Zone* zone = info->zone();
module = new (zone) wasm::ZoneBuffer(zone);
parser.module_builder()->WriteTo(*module);
asm_offsets = new (zone) wasm::ZoneBuffer(zone);
parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets);
// TODO(bradnelson): Remove foreign_globals plumbing (as we don't need it
// for the new parser).
foreign_globals = info->isolate()->factory()->NewFixedArray(0);
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(parser.stdlib_uses()->size()));
int count = 0;
for (auto i : *parser.stdlib_uses()) {
uses_array->set(count++, Smi::FromInt(i));
}
} else {
auto asm_wasm_result = builder.Run(&foreign_globals);
if (!asm_wasm_result.success) {
DCHECK(!info->isolate()->has_pending_exception());
@ -179,32 +221,34 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
}
return MaybeHandle<FixedArray>();
}
double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
module = asm_wasm_result.module_bytes;
asm_offsets = asm_wasm_result.asm_offset_table;
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(uses.size()));
int count = 0;
for (auto i : uses) {
uses_array->set(count++, Smi::FromInt(i));
}
}
wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
static_cast<int>(asm_offsets->size()));
base::ElapsedTimer compile_timer;
compile_timer.Start();
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
info->isolate(), &thrower,
wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
asm_offsets_vec);
DCHECK(!compiled.is_null());
DCHECK(!thrower.error());
double compile_time = compile_timer.Elapsed().InMillisecondsF();
DCHECK_GE(module->end(), module->begin());
uintptr_t wasm_size = module->end() - module->begin();
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
int count = 0;
for (auto i : uses) {
uses_array->set(count++, Smi::FromInt(i));
}
Handle<FixedArray> result =
info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
@ -264,8 +308,6 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
i::Handle<i::FixedArray> foreign_globals(
i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
// Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object;
if (!foreign.is_null()) {
@ -276,13 +318,17 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
foreign, NONE);
}
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
i::MaybeHandle<i::Object> maybe_module_object =
i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
if (maybe_module_object.is_null()) {
thrower.Reify(); // Ensure exceptions do not propagate.
return MaybeHandle<Object>();
}
DCHECK(!thrower.error());
i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
if (!FLAG_fast_validate_asm) {
i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
wasm::AsmWasmBuilder::foreign_init_name));
i::Handle<i::Object> init =
@ -306,10 +352,12 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
}
foreign_args_array[j] = undefined;
}
i::MaybeHandle<i::Object> retval = i::Execution::Call(
isolate, init, undefined, foreign_globals->length(), foreign_args_array);
i::MaybeHandle<i::Object> retval =
i::Execution::Call(isolate, init, undefined, foreign_globals->length(),
foreign_args_array);
delete[] foreign_args_array;
DCHECK(!retval.is_null());
}
i::Handle<i::Name> single_function_name(
isolate->factory()->InternalizeUtf8String(

110
deps/v8/src/asmjs/asm-names.h

@ -0,0 +1,110 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_NAMES_H_
#define V8_ASMJS_ASM_NAMES_H_
#define STDLIB_MATH_VALUE_LIST(V) \
V(E) \
V(LN10) \
V(LN2) \
V(LOG2E) \
V(LOG10E) \
V(PI) \
V(SQRT1_2) \
V(SQRT2)
// V(stdlib.Math.<name>, Name, wasm-opcode, asm-js-type)
#define STDLIB_MATH_FUNCTION_MONOMORPHIC_LIST(V) \
V(acos, Acos, kExprF64Acos, dq2d) \
V(asin, Asin, kExprF64Asin, dq2d) \
V(atan, Atan, kExprF64Atan, dq2d) \
V(cos, Cos, kExprF64Cos, dq2d) \
V(sin, Sin, kExprF64Sin, dq2d) \
V(tan, Tan, kExprF64Tan, dq2d) \
V(exp, Exp, kExprF64Exp, dq2d) \
V(log, Log, kExprF64Log, dq2d) \
V(atan2, Atan2, kExprF64Atan2, dqdq2d) \
V(pow, Pow, kExprF64Pow, dqdq2d) \
V(imul, Imul, kExprI32Mul, ii2s) \
V(clz32, Clz32, kExprI32Clz, i2s)
// V(stdlib.Math.<name>, Name, unused, asm-js-type)
#define STDLIB_MATH_FUNCTION_CEIL_LIKE_LIST(V) \
V(ceil, Ceil, x, ceil_like) \
V(floor, Floor, x, ceil_like) \
V(sqrt, Sqrt, x, ceil_like)
// V(stdlib.Math.<name>, Name, unused, asm-js-type)
#define STDLIB_MATH_FUNCTION_LIST(V) \
V(min, Min, x, minmax) \
V(max, Max, x, minmax) \
V(abs, Abs, x, abs) \
V(fround, Fround, x, fround) \
STDLIB_MATH_FUNCTION_MONOMORPHIC_LIST(V) \
STDLIB_MATH_FUNCTION_CEIL_LIKE_LIST(V)
// V(stdlib.<name>, wasm-load-type, wasm-store-type, wasm-type)
#define STDLIB_ARRAY_TYPE_LIST(V) \
V(Int8Array, Mem8S, Mem8, I32) \
V(Uint8Array, Mem8U, Mem8, I32) \
V(Int16Array, Mem16S, Mem16, I32) \
V(Uint16Array, Mem16U, Mem16, I32) \
V(Int32Array, Mem, Mem, I32) \
V(Uint32Array, Mem, Mem, I32) \
V(Float32Array, Mem, Mem, F32) \
V(Float64Array, Mem, Mem, F64)
#define STDLIB_OTHER_LIST(V) \
V(Infinity) \
V(NaN) \
V(Math)
// clang-format off (for return)
#define KEYWORD_NAME_LIST(V) \
V(arguments) \
V(break) \
V(case) \
V(const) \
V(continue) \
V(default) \
V(do) \
V(else) \
V(eval) \
V(for) \
V(function) \
V(if) \
V(new) \
V(return ) \
V(switch) \
V(var) \
V(while)
// clang-format on
// V(token-string, token-name)
#define LONG_SYMBOL_NAME_LIST(V) \
V("<=", LE) \
V(">=", GE) \
V("==", EQ) \
V("!=", NE) \
V("<<", SHL) \
V(">>", SAR) \
V(">>>", SHR) \
V("'use asm'", UseAsm)
// clang-format off
#define SIMPLE_SINGLE_TOKEN_LIST(V) \
V('+') V('-') V('*') V('%') V('~') V('^') V('&') V('|') V('(') V(')') \
V('[') V(']') V('{') V('}') V(':') V(';') V(',') V('?')
// clang-format on
// V(name, value, string-name)
#define SPECIAL_TOKEN_LIST(V) \
V(kUninitialized, 0, "{uninitalized}") \
V(kEndOfInput, -1, "{end of input}") \
V(kParseError, -2, "{parse error}") \
V(kUnsigned, -3, "{unsigned value}") \
V(kDouble, -4, "{double value}")
#endif

2449
deps/v8/src/asmjs/asm-parser.cc

File diff suppressed because it is too large

316
deps/v8/src/asmjs/asm-parser.h

@ -0,0 +1,316 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_PARSER_H_
#define V8_ASMJS_ASM_PARSER_H_
#include <list>
#include <string>
#include <vector>
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-types.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace wasm {
// A custom parser + validator + wasm converter for asm.js:
// http://asmjs.org/spec/latest/
// This parser intentionally avoids the portion of JavaScript parsing
// that are not required to determine if code is valid asm.js code.
// * It is mostly one pass.
// * It bails out on unexpected input.
// * It assumes strict ordering insofar as permitted by asm.js validation rules.
// * It relies on a custom scanner that provides de-duped identifiers in two
// scopes (local + module wide).
class AsmJsParser {
public:
explicit AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
int start, int end);
bool Run();
const char* failure_message() const { return failure_message_.c_str(); }
int failure_location() const { return failure_location_; }
WasmModuleBuilder* module_builder() { return module_builder_; }
const AsmTyper::StdlibSet* stdlib_uses() const { return &stdlib_uses_; }
private:
// clang-format off
enum class VarKind {
kUnused,
kLocal,
kGlobal,
kSpecial,
kFunction,
kTable,
kImportedFunction,
#define V(_unused0, Name, _unused1, _unused2) kMath##Name,
STDLIB_MATH_FUNCTION_LIST(V)
#undef V
#define V(Name) kMath##Name,
STDLIB_MATH_VALUE_LIST(V)
#undef V
};
// clang-format on
struct FunctionImportInfo {
char* function_name;
size_t function_name_size;
SignatureMap cache;
std::vector<uint32_t> cache_index;
};
struct VarInfo {
AsmType* type;
WasmFunctionBuilder* function_builder;
FunctionImportInfo* import;
int32_t mask;
uint32_t index;
VarKind kind;
bool mutable_variable;
bool function_defined;
VarInfo();
void DeclareGlobalImport(AsmType* type, uint32_t index);
void DeclareStdlibFunc(VarKind kind, AsmType* type);
};
struct GlobalImport {
char* import_name;
size_t import_name_size;
uint32_t import_index;
uint32_t global_index;
bool needs_init;
};
enum class BlockKind { kRegular, kLoop, kOther };
struct BlockInfo {
BlockKind kind;
AsmJsScanner::token_t label;
};
// Helper class to make {TempVariable} safe for nesting.
class TemporaryVariableScope;
Zone* zone_;
AsmJsScanner scanner_;
WasmModuleBuilder* module_builder_;
WasmFunctionBuilder* current_function_builder_;
AsmType* return_type_;
std::uintptr_t stack_limit_;
AsmTyper::StdlibSet stdlib_uses_;
std::list<FunctionImportInfo> function_import_info_;
ZoneVector<VarInfo> global_var_info_;
ZoneVector<VarInfo> local_var_info_;
int function_temp_locals_offset_;
int function_temp_locals_used_;
int function_temp_locals_depth_;
// Error Handling related
bool failed_;
std::string failure_message_;
int failure_location_;
// Module Related.
AsmJsScanner::token_t stdlib_name_;
AsmJsScanner::token_t foreign_name_;
AsmJsScanner::token_t heap_name_;
static const AsmJsScanner::token_t kTokenNone = 0;
// Track if parsing a heap assignment.
bool inside_heap_assignment_;
AsmType* heap_access_type_;
ZoneVector<BlockInfo> block_stack_;
// Types used for stdlib function and their set up.
AsmType* stdlib_dq2d_;
AsmType* stdlib_dqdq2d_;
AsmType* stdlib_fq2f_;
AsmType* stdlib_i2s_;
AsmType* stdlib_ii2s_;
AsmType* stdlib_minmax_;
AsmType* stdlib_abs_;
AsmType* stdlib_ceil_like_;
AsmType* stdlib_fround_;
// When making calls, the return type is needed to lookup signatures.
// For +callsite(..) or fround(callsite(..)) use this value to pass
// along the coercion.
AsmType* call_coercion_;
// The source position associated with the above {call_coercion}.
size_t call_coercion_position_;
// Used to track the last label we've seen so it can be matched to later
// statements it's attached to.
AsmJsScanner::token_t pending_label_;
// Global imports.
// NOTE: Holds the strings referenced in wasm-module-builder for imports.
ZoneLinkedList<GlobalImport> global_imports_;
Zone* zone() { return zone_; }
inline bool Peek(AsmJsScanner::token_t token) {
return scanner_.Token() == token;
}
inline bool Check(AsmJsScanner::token_t token) {
if (scanner_.Token() == token) {
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForZero() {
if (scanner_.IsUnsigned() && scanner_.AsUnsigned() == 0) {
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForDouble(double* value) {
if (scanner_.IsDouble()) {
*value = scanner_.AsDouble();
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForUnsigned(uint64_t* value) {
if (scanner_.IsUnsigned()) {
*value = scanner_.AsUnsigned();
scanner_.Next();
return true;
} else {
return false;
}
}
inline bool CheckForUnsignedBelow(uint64_t limit, uint64_t* value) {
if (scanner_.IsUnsigned() && scanner_.AsUnsigned() < limit) {
*value = scanner_.AsUnsigned();
scanner_.Next();
return true;
} else {
return false;
}
}
inline AsmJsScanner::token_t Consume() {
AsmJsScanner::token_t ret = scanner_.Token();
scanner_.Next();
return ret;
}
void SkipSemicolon();
VarInfo* GetVarInfo(AsmJsScanner::token_t token);
uint32_t VarIndex(VarInfo* info);
void DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type,
ValueType vtype,
const WasmInitExpr& init = WasmInitExpr());
// Allocates a temporary local variable. The given {index} is absolute within
// the function body, consider using {TemporaryVariableScope} when nesting.
uint32_t TempVariable(int index);
void AddGlobalImport(std::string name, AsmType* type, ValueType vtype,
bool mutable_variable, VarInfo* info);
// Use to set up block stack layers (including synthetic ones for if-else).
// Begin/Loop/End below are implemented with these plus code generation.
void BareBegin(BlockKind kind = BlockKind::kOther,
AsmJsScanner::token_t label = 0);
void BareEnd();
int FindContinueLabelDepth(AsmJsScanner::token_t label);
int FindBreakLabelDepth(AsmJsScanner::token_t label);
// Use to set up actual wasm blocks/loops.
void Begin(AsmJsScanner::token_t label = 0);
void Loop(AsmJsScanner::token_t label = 0);
void End();
void InitializeStdlibTypes();
FunctionSig* ConvertSignature(AsmType* return_type,
const std::vector<AsmType*>& params);
// 6.1 ValidateModule
void ValidateModule();
void ValidateModuleParameters();
void ValidateModuleVars();
void ValidateModuleVar(bool mutable_variable);
bool ValidateModuleVarImport(VarInfo* info, bool mutable_variable);
void ValidateModuleVarStdlib(VarInfo* info);
void ValidateModuleVarNewStdlib(VarInfo* info);
void ValidateModuleVarFromGlobal(VarInfo* info, bool mutable_variable);
void ValidateExport(); // 6.2 ValidateExport
void ValidateFunctionTable(); // 6.3 ValidateFunctionTable
void ValidateFunction(); // 6.4 ValidateFunction
void ValidateFunctionParams(std::vector<AsmType*>* params);
void ValidateFunctionLocals(size_t param_count,
std::vector<ValueType>* locals);
void ValidateStatement(); // ValidateStatement
void Block(); // 6.5.1 Block
void ExpressionStatement(); // 6.5.2 ExpressionStatement
void EmptyStatement(); // 6.5.3 EmptyStatement
void IfStatement(); // 6.5.4 IfStatement
void ReturnStatement(); // 6.5.5 ReturnStatement
bool IterationStatement(); // 6.5.6 IterationStatement
void WhileStatement(); // 6.5.6 IterationStatement - while
void DoStatement(); // 6.5.6 IterationStatement - do
void ForStatement(); // 6.5.6 IterationStatement - for
void BreakStatement(); // 6.5.7 BreakStatement
void ContinueStatement(); // 6.5.8 ContinueStatement
void LabelledStatement(); // 6.5.9 LabelledStatement
void SwitchStatement(); // 6.5.10 SwitchStatement
void ValidateCase(); // 6.6. ValidateCase
void ValidateDefault(); // 6.7 ValidateDefault
AsmType* ValidateExpression(); // 6.8 ValidateExpression
AsmType* Expression(AsmType* expect); // 6.8.1 Expression
AsmType* NumericLiteral(); // 6.8.2 NumericLiteral
AsmType* Identifier(); // 6.8.3 Identifier
AsmType* CallExpression(); // 6.8.4 CallExpression
AsmType* MemberExpression(); // 6.8.5 MemberExpression
AsmType* AssignmentExpression(); // 6.8.6 AssignmentExpression
AsmType* UnaryExpression(); // 6.8.7 UnaryExpression
AsmType* MultiplicativeExpression(); // 6.8.8 MultaplicativeExpression
AsmType* AdditiveExpression(); // 6.8.9 AdditiveExpression
AsmType* ShiftExpression(); // 6.8.10 ShiftExpression
AsmType* RelationalExpression(); // 6.8.11 RelationalExpression
AsmType* EqualityExpression(); // 6.8.12 EqualityExpression
AsmType* BitwiseANDExpression(); // 6.8.13 BitwiseANDExpression
AsmType* BitwiseXORExpression(); // 6.8.14 BitwiseXORExpression
AsmType* BitwiseORExpression(); // 6.8.15 BitwiseORExpression
AsmType* ConditionalExpression(); // 6.8.16 ConditionalExpression
AsmType* ParenthesizedExpression(); // 6.8.17 ParenthesiedExpression
AsmType* ValidateCall(); // 6.9 ValidateCall
bool PeekCall(); // 6.9 ValidateCall - helper
void ValidateHeapAccess(); // 6.10 ValidateHeapAccess
void ValidateFloatCoercion(); // 6.11 ValidateFloatCoercion
void GatherCases(std::vector<int32_t>* cases);
};
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_ASMJS_ASM_PARSER_H_

431
deps/v8/src/asmjs/asm-scanner.cc

@ -0,0 +1,431 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/asmjs/asm-scanner.h"
#include "src/conversions.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
#include "src/unicode-cache.h"
namespace v8 {
namespace internal {
namespace {
// Cap number of identifiers to ensure we can assign both global and
// local ones a token id in the range of an int32_t.
static const int kMaxIdentifierCount = 0xf000000;
};
AsmJsScanner::AsmJsScanner()
: token_(kUninitialized),
preceding_token_(kUninitialized),
next_token_(kUninitialized),
position_(0),
preceding_position_(0),
next_position_(0),
rewind_(false),
in_local_scope_(false),
global_count_(0),
double_value_(0.0),
unsigned_value_(0),
preceded_by_newline_(false) {
#define V(name, _junk1, _junk2, _junk3) property_names_[#name] = kToken_##name;
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
#define V(name) property_names_[#name] = kToken_##name;
STDLIB_MATH_VALUE_LIST(V)
STDLIB_OTHER_LIST(V)
#undef V
#define V(name) global_names_[#name] = kToken_##name;
KEYWORD_NAME_LIST(V)
#undef V
}
void AsmJsScanner::SetStream(std::unique_ptr<Utf16CharacterStream> stream) {
stream_ = std::move(stream);
Next();
}
void AsmJsScanner::Next() {
if (rewind_) {
preceding_token_ = token_;
preceding_position_ = position_;
token_ = next_token_;
position_ = next_position_;
next_token_ = kUninitialized;
next_position_ = 0;
rewind_ = false;
return;
}
if (token_ == kEndOfInput || token_ == kParseError) {
return;
}
#if DEBUG
if (FLAG_trace_asm_scanner) {
if (Token() == kDouble) {
PrintF("%lf ", AsDouble());
} else if (Token() == kUnsigned) {
PrintF("%" PRIu64 " ", AsUnsigned());
} else {
std::string name = Name(Token());
PrintF("%s ", name.c_str());
}
}
#endif
preceded_by_newline_ = false;
preceding_token_ = token_;
preceding_position_ = position_;
for (;;) {
position_ = stream_->pos();
uc32 ch = stream_->Advance();
switch (ch) {
case ' ':
case '\t':
case '\r':
// Ignore whitespace.
break;
case '\n':
// Track when we've passed a newline for optional semicolon support,
// but keep scanning.
preceded_by_newline_ = true;
break;
case kEndOfInput:
token_ = kEndOfInput;
return;
case '\'':
case '"':
ConsumeString(ch);
return;
case '/':
ch = stream_->Advance();
if (ch == '/') {
ConsumeCPPComment();
} else if (ch == '*') {
if (!ConsumeCComment()) {
token_ = kParseError;
return;
}
} else {
stream_->Back();
token_ = '/';
return;
}
// Breaks out of switch, but loops again (i.e. the case when we parsed
// a comment, but need to continue to look for the next token).
break;
case '<':
case '>':
case '=':
case '!':
ConsumeCompareOrShift(ch);
return;
#define V(single_char_token) case single_char_token:
SIMPLE_SINGLE_TOKEN_LIST(V)
#undef V
// Use fixed token IDs for ASCII.
token_ = ch;
return;
default:
if (IsIdentifierStart(ch)) {
ConsumeIdentifier(ch);
} else if (IsNumberStart(ch)) {
ConsumeNumber(ch);
} else {
// TODO(bradnelson): Support unicode (probably via UnicodeCache).
token_ = kParseError;
}
return;
}
}
}
void AsmJsScanner::Rewind() {
DCHECK_NE(kUninitialized, preceding_token_);
// TODO(bradnelson): Currently rewinding needs to leave in place the
// preceding newline state (in case a |0 ends a line).
// This is weird and stateful, fix me.
DCHECK(!rewind_);
next_token_ = token_;
next_position_ = position_;
token_ = preceding_token_;
position_ = preceding_position_;
preceding_token_ = kUninitialized;
preceding_position_ = 0;
rewind_ = true;
identifier_string_.clear();
}
void AsmJsScanner::ResetLocals() { local_names_.clear(); }
#if DEBUG
// Only used for debugging.
std::string AsmJsScanner::Name(token_t token) const {
if (token >= 32 && token < 127) {
return std::string(1, static_cast<char>(token));
}
for (auto& i : local_names_) {
if (i.second == token) {
return i.first;
}
}
for (auto& i : global_names_) {
if (i.second == token) {
return i.first;
}
}
for (auto& i : property_names_) {
if (i.second == token) {
return i.first;
}
}
switch (token) {
#define V(rawname, name) \
case kToken_##name: \
return rawname;
LONG_SYMBOL_NAME_LIST(V)
#undef V
#define V(name, value, string_name) \
case name: \
return string_name;
SPECIAL_TOKEN_LIST(V)
default:
break;
}
UNREACHABLE();
return "{unreachable}";
}
#endif
int AsmJsScanner::GetPosition() const {
DCHECK(!rewind_);
return static_cast<int>(stream_->pos());
}
void AsmJsScanner::Seek(int pos) {
stream_->Seek(pos);
preceding_token_ = kUninitialized;
token_ = kUninitialized;
next_token_ = kUninitialized;
preceding_position_ = 0;
position_ = 0;
next_position_ = 0;
rewind_ = false;
Next();
}
void AsmJsScanner::ConsumeIdentifier(uc32 ch) {
// Consume characters while still part of the identifier.
identifier_string_.clear();
while (IsIdentifierPart(ch)) {
identifier_string_ += ch;
ch = stream_->Advance();
}
// Go back one for next time.
stream_->Back();
// Decode what the identifier means.
if (preceding_token_ == '.') {
auto i = property_names_.find(identifier_string_);
if (i != property_names_.end()) {
token_ = i->second;
return;
}
} else {
{
auto i = local_names_.find(identifier_string_);
if (i != local_names_.end()) {
token_ = i->second;
return;
}
}
if (!in_local_scope_) {
auto i = global_names_.find(identifier_string_);
if (i != global_names_.end()) {
token_ = i->second;
return;
}
}
}
if (preceding_token_ == '.') {
CHECK(global_count_ < kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
property_names_[identifier_string_] = token_;
} else if (in_local_scope_) {
CHECK(local_names_.size() < kMaxIdentifierCount);
token_ = kLocalsStart - static_cast<token_t>(local_names_.size());
local_names_[identifier_string_] = token_;
} else {
CHECK(global_count_ < kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
global_names_[identifier_string_] = token_;
}
}
void AsmJsScanner::ConsumeNumber(uc32 ch) {
std::string number;
number = ch;
bool has_dot = ch == '.';
for (;;) {
ch = stream_->Advance();
if ((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F') || ch == '.' || ch == 'b' || ch == 'o' ||
ch == 'x' ||
((ch == '-' || ch == '+') && (number[number.size() - 1] == 'e' ||
number[number.size() - 1] == 'E'))) {
// TODO(bradnelson): Test weird cases ending in -.
if (ch == '.') {
has_dot = true;
}
number.push_back(ch);
} else {
break;
}
}
stream_->Back();
// Special case the most common number.
if (number.size() == 1 && number[0] == '0') {
unsigned_value_ = 0;
token_ = kUnsigned;
return;
}
// Pick out dot.
if (number.size() == 1 && number[0] == '.') {
token_ = '.';
return;
}
// Decode numbers.
UnicodeCache cache;
double_value_ = StringToDouble(
&cache,
Vector<uint8_t>(
const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(number.data())),
static_cast<int>(number.size())),
ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL);
if (std::isnan(double_value_)) {
// Check if string to number conversion didn't consume all the characters.
// This happens if the character filter let through something invalid
// like: 0123ef for example.
// TODO(bradnelson): Check if this happens often enough to be a perf
// problem.
if (number[0] == '.') {
for (size_t k = 1; k < number.size(); ++k) {
stream_->Back();
}
token_ = '.';
return;
}
// Anything else that doesn't parse is an error.
token_ = kParseError;
return;
}
if (has_dot) {
token_ = kDouble;
} else {
unsigned_value_ = static_cast<uint32_t>(double_value_);
token_ = kUnsigned;
}
}
bool AsmJsScanner::ConsumeCComment() {
for (;;) {
uc32 ch = stream_->Advance();
while (ch == '*') {
ch = stream_->Advance();
if (ch == '/') {
return true;
}
}
if (ch == kEndOfInput) {
return false;
}
}
}
void AsmJsScanner::ConsumeCPPComment() {
for (;;) {
uc32 ch = stream_->Advance();
if (ch == '\n' || ch == kEndOfInput) {
return;
}
}
}
void AsmJsScanner::ConsumeString(uc32 quote) {
// Only string allowed is 'use asm' / "use asm".
const char* expected = "use asm";
for (; *expected != '\0'; ++expected) {
if (stream_->Advance() != *expected) {
token_ = kParseError;
return;
}
}
if (stream_->Advance() != quote) {
token_ = kParseError;
return;
}
token_ = kToken_UseAsm;
}
void AsmJsScanner::ConsumeCompareOrShift(uc32 ch) {
uc32 next_ch = stream_->Advance();
if (next_ch == '=') {
switch (ch) {
case '<':
token_ = kToken_LE;
break;
case '>':
token_ = kToken_GE;
break;
case '=':
token_ = kToken_EQ;
break;
case '!':
token_ = kToken_NE;
break;
default:
UNREACHABLE();
}
} else if (ch == '<' && next_ch == '<') {
token_ = kToken_SHL;
} else if (ch == '>' && next_ch == '>') {
if (stream_->Advance() == '>') {
token_ = kToken_SHR;
} else {
token_ = kToken_SAR;
stream_->Back();
}
} else {
stream_->Back();
token_ = ch;
}
}
bool AsmJsScanner::IsIdentifierStart(uc32 ch) {
return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_' ||
ch == '$';
}
bool AsmJsScanner::IsIdentifierPart(uc32 ch) {
return IsIdentifierStart(ch) || (ch >= '0' && ch <= '9');
}
bool AsmJsScanner::IsNumberStart(uc32 ch) {
return ch == '.' || (ch >= '0' && ch <= '9');
}
} // namespace internal
} // namespace v8

165
deps/v8/src/asmjs/asm-scanner.h

@ -0,0 +1,165 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_ASM_SCANNER_H_
#define V8_ASMJS_ASM_SCANNER_H_
#include <memory>
#include <string>
#include <unordered_map>
#include "src/asmjs/asm-names.h"
#include "src/base/logging.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
class Utf16CharacterStream;
// A custom scanner to extract the token stream needed to parse valid
// asm.js: http://asmjs.org/spec/latest/
// This scanner intentionally avoids the portion of JavaScript lexing
// that are not required to determine if code is valid asm.js code.
// * Strings are disallowed except for 'use asm'.
// * Only the subset of keywords needed to check asm.js invariants are
// included.
// * Identifiers are accumulated into local + global string tables
// (for performance).
class V8_EXPORT_PRIVATE AsmJsScanner {
public:
typedef int32_t token_t;
AsmJsScanner();
// Pick the stream to parse (must be called before anything else).
void SetStream(std::unique_ptr<Utf16CharacterStream> stream);
// Get current token.
token_t Token() const { return token_; }
// Get position of current token.
size_t Position() const { return position_; }
// Advance to the next token.
void Next();
// Back up by one token.
void Rewind();
// Get raw string for current identifier.
const std::string& GetIdentifierString() const {
// Identifier strings don't work after a rewind.
DCHECK(!rewind_);
return identifier_string_;
}
// Check if we just passed a newline.
bool IsPrecededByNewline() const {
// Newline tracking doesn't work if you back up.
DCHECK(!rewind_);
return preceded_by_newline_;
}
#if DEBUG
// Debug only method to go from a token back to its name.
// Slow, only use for debugging.
std::string Name(token_t token) const;
#endif
// Get current position (to use with Seek).
int GetPosition() const;
// Restores old position (token after that position).
void Seek(int pos);
// Select whether identifiers are resolved in global or local scope,
// and which scope new identifiers are added to.
void EnterLocalScope() { in_local_scope_ = true; }
void EnterGlobalScope() { in_local_scope_ = false; }
// Drop all current local identifiers.
void ResetLocals();
// Methods to check if a token is an identifier and which scope.
bool IsLocal() const { return IsLocal(Token()); }
bool IsGlobal() const { return IsGlobal(Token()); }
static bool IsLocal(token_t token) { return token <= kLocalsStart; }
static bool IsGlobal(token_t token) { return token >= kGlobalsStart; }
// Methods to find the index position of an identifier (count starting from
// 0 for each scope separately).
static size_t LocalIndex(token_t token) {
DCHECK(IsLocal(token));
return -(token - kLocalsStart);
}
static size_t GlobalIndex(token_t token) {
DCHECK(IsGlobal(token));
return token - kGlobalsStart;
}
// Methods to check if the current token is an asm.js "number" (contains a
// dot) or an "unsigned" (a number without a dot).
bool IsUnsigned() const { return Token() == kUnsigned; }
uint64_t AsUnsigned() const { return unsigned_value_; }
bool IsDouble() const { return Token() == kDouble; }
double AsDouble() const { return double_value_; }
// clang-format off
enum {
// [-10000-kMaxIdentifierCount, -10000) :: Local identifiers (counting
// backwards)
// [-10000 .. -1) :: Builtin tokens like keywords
// (also includes some special
// ones like end of input)
// 0 .. 255 :: Single char tokens
// 256 .. 256+kMaxIdentifierCount :: Global identifiers
kLocalsStart = -10000,
#define V(name, _junk1, _junk2, _junk3) kToken_##name,
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
#define V(name) kToken_##name,
STDLIB_OTHER_LIST(V)
STDLIB_MATH_VALUE_LIST(V)
KEYWORD_NAME_LIST(V)
#undef V
#define V(rawname, name) kToken_##name,
LONG_SYMBOL_NAME_LIST(V)
#undef V
#define V(name, value, string_name) name = value,
SPECIAL_TOKEN_LIST(V)
#undef V
kGlobalsStart = 256,
};
// clang-format on
private:
std::unique_ptr<Utf16CharacterStream> stream_;
token_t token_;
token_t preceding_token_;
token_t next_token_; // Only set when in {rewind} state.
size_t position_; // Corresponds to {token} position.
size_t preceding_position_; // Corresponds to {preceding_token} position.
size_t next_position_; // Only set when in {rewind} state.
bool rewind_;
std::string identifier_string_;
bool in_local_scope_;
std::unordered_map<std::string, token_t> local_names_;
std::unordered_map<std::string, token_t> global_names_;
std::unordered_map<std::string, token_t> property_names_;
int global_count_;
double double_value_;
uint64_t unsigned_value_;
bool preceded_by_newline_;
// Consume multiple characters.
void ConsumeIdentifier(uc32 ch);
void ConsumeNumber(uc32 ch);
bool ConsumeCComment();
void ConsumeCPPComment();
void ConsumeString(uc32 quote);
void ConsumeCompareOrShift(uc32 ch);
// Classify character categories.
bool IsIdentifierStart(uc32 ch);
bool IsIdentifierPart(uc32 ch);
bool IsNumberStart(uc32 ch);
};
} // namespace internal
} // namespace v8
#endif // V8_ASMJS_ASM_SCANNER_H_

12
deps/v8/src/asmjs/asm-wasm-builder.cc

@ -91,6 +91,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
FunctionSig::Builder b(zone(), 0, 0);
init_function_ = builder_->AddFunction(b.Build());
builder_->MarkStartFunction(init_function_);
// Record start of the function, used as position for the stack check.
init_function_->SetAsmFunctionStartPosition(literal_->start_position());
}
void BuildForeignInitFunction() {
@ -170,7 +172,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
new_func_scope = new (info->zone()) DeclarationScope(
info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info->set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(info.get())) {
if (!Compiler::ParseAndAnalyze(info.get(), info_->isolate())) {
decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
if (isolate_->has_pending_exception()) {
isolate_->clear_pending_exception();
@ -224,6 +226,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
RECURSE(Visit(stmt));
if (typer_failed_) break;
// Not stopping when a jump statement is found.
}
}
@ -300,6 +303,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void VisitImportCallExpression(ImportCallExpression* expr) { UNREACHABLE(); }
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
@ -1066,7 +1071,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (as_init) UnLoadInitFunction();
}
void VisitYield(Yield* expr) { UNREACHABLE(); }
void VisitSuspend(Suspend* expr) { UNREACHABLE(); }
void VisitThrow(Throw* expr) { UNREACHABLE(); }
@ -2001,6 +2006,9 @@ AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
info_->parse_info()->ast_value_factory(),
info_->script(), info_->literal(), &typer_);
bool success = impl.Build();
if (!success) {
return {nullptr, nullptr, success};
}
*foreign_args = impl.GetForeignArgs();
ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteTo(*module_buffer);

120
deps/v8/src/assembler.cc

@ -138,35 +138,40 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate),
jit_cookie_(0),
AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
: serializer_enabled_(isolate->serializer_enabled())
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
,
max_old_generation_size_(isolate->heap()->MaxOldGenerationSize())
#endif
#if V8_TARGET_ARCH_X64
,
code_range_start_(
isolate->heap()->memory_allocator()->code_range()->start())
#endif
{
}
AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
int buffer_size)
: isolate_data_(isolate_data),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
constant_pool_available_(false) {
DCHECK_NOT_NULL(isolate);
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
own_buffer_ = buffer == NULL;
if (buffer_size == 0) buffer_size = kMinimalBufferSize;
DCHECK(buffer_size > 0);
if (own_buffer_) buffer = NewArray<byte>(buffer_size);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
pc_ = buffer_;
}
AssemblerBase::~AssemblerBase() {
if (own_buffer_) DeleteArray(buffer_);
}
void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
if (size == 0) return;
@ -178,10 +183,9 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
#endif // USE_SIMULATOR
}
void AssemblerBase::Print() {
void AssemblerBase::Print(Isolate* isolate) {
OFStream os(stdout);
v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr);
}
@ -308,68 +312,62 @@ const int kCodeWithIdTag = 0;
const int kDeoptReasonTag = 1;
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK_GE(wasm_memory_reference(), old_base);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references
// may change over time.
unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
unchecked_update_wasm_memory_reference(isolate, updated_reference,
icache_flush_mode);
}
void RelocInfo::update_wasm_memory_size(uint32_t old_size, uint32_t new_size,
void RelocInfo::update_wasm_memory_size(Isolate* isolate, uint32_t old_size,
uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemorySizeReference(rmode_));
uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
unchecked_update_wasm_size(isolate, updated_size_reference,
icache_flush_mode);
}
void RelocInfo::update_wasm_global_reference(
Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmGlobalReference(rmode_));
Address updated_reference;
DCHECK(reinterpret_cast<uintptr_t>(old_base) <=
reinterpret_cast<uintptr_t>(wasm_global_reference()));
DCHECK_LE(old_base, wasm_global_reference());
updated_reference = new_base + (wasm_global_reference() - old_base);
DCHECK(reinterpret_cast<uintptr_t>(new_base) <=
reinterpret_cast<uintptr_t>(updated_reference));
unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
}
DCHECK_LE(new_base, updated_reference);
unchecked_update_wasm_memory_reference(isolate, updated_reference,
icache_flush_mode);
}
void RelocInfo::update_wasm_function_table_size_reference(
uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
uint32_t current_size_reference = wasm_function_table_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
unchecked_update_wasm_size(isolate, updated_size_reference,
icache_flush_mode);
}
void RelocInfo::set_target_address(Address target,
void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target,
Assembler::set_target_address_at(isolate, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
@ -652,9 +650,7 @@ void RelocIterator::next() {
done_ = true;
}
RelocIterator::RelocIterator(Code* code, int mode_mask)
: rinfo_(code->map()->GetIsolate()) {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@ -677,9 +673,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask)
next();
}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: rinfo_(desc.origin->isolate()) {
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// Relocation info is read backwards.
@ -702,7 +696,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
@ -1234,6 +1228,11 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address());
}
ExternalReference ExternalReference::address_of_regexp_dotall_flag(
Isolate* isolate) {
return ExternalReference(&FLAG_harmony_regexp_dotall);
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@ -1546,6 +1545,23 @@ ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
}
void* libc_memcpy(void* dest, const void* src, size_t n) {
return memcpy(dest, src, n);
}
ExternalReference ExternalReference::libc_memcpy_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memcpy)));
}
void* libc_memset(void* dest, int byte, size_t n) {
DCHECK_EQ(static_cast<char>(byte), byte);
return memset(dest, byte, n);
}
ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
@ -1902,14 +1918,12 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
void Assembler::RecordDeoptReason(DeoptimizeReason reason,
SourcePosition position, int id) {
if (FLAG_trace_deopt || isolate()->is_profiling()) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
}
void Assembler::RecordComment(const char* msg) {

81
deps/v8/src/assembler.h

@ -64,18 +64,30 @@ enum class CodeObjectRequired { kNo, kYes };
class AssemblerBase: public Malloced {
public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
struct IsolateData {
explicit IsolateData(Isolate* isolate);
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
size_t max_old_generation_size_;
#endif
#if V8_TARGET_ARCH_X64
Address code_range_start_;
#endif
};
AssemblerBase(IsolateData isolate_data, void* buffer, int buffer_size);
virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; }
int jit_cookie() const { return jit_cookie_; }
IsolateData isolate_data() const { return isolate_data_; }
bool serializer_enabled() const { return isolate_data_.serializer_enabled_; }
void enable_serializer() { isolate_data_.serializer_enabled_ = true; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
bool serializer_enabled() const { return serializer_enabled_; }
void enable_serializer() { serializer_enabled_ = true; }
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@ -113,7 +125,7 @@ class AssemblerBase: public Malloced {
virtual void AbortedCodeGeneration() { }
// Debugging
void Print();
void Print(Isolate* isolate);
static const int kMinimalBufferSize = 4*KB;
@ -139,12 +151,10 @@ class AssemblerBase: public Malloced {
byte* pc_;
private:
Isolate* isolate_;
int jit_cookie_;
IsolateData isolate_data_;
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
bool predictable_code_size_;
bool serializer_enabled_;
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
@ -241,7 +251,7 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsCrankshaft();
static inline bool SupportsSimd128();
static inline bool SupportsWasmSimd128();
static inline unsigned icache_line_size() {
DCHECK(icache_line_size_ != 0);
@ -372,14 +382,10 @@ class RelocInfo {
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
explicit RelocInfo(Isolate* isolate) : isolate_(isolate) {
DCHECK_NOT_NULL(isolate);
}
RelocInfo() = default;
RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host)
: isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) {
DCHECK_NOT_NULL(isolate);
}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
@ -478,7 +484,6 @@ class RelocInfo {
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
Isolate* isolate() const { return isolate_; }
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
@ -506,34 +511,34 @@ class RelocInfo {
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference(
Address old_base, Address new_base,
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_memory_size(
uint32_t old_size, uint32_t new_size,
Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference(
Address old_base, Address new_base,
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
uint32_t old_base, uint32_t new_base,
Isolate* isolate, uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
INLINE(HeapObject* target_object());
INLINE(Handle<HeapObject> target_object_handle(Assembler* origin));
INLINE(void set_target_object(
Object* target,
HeapObject* target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(
Address target,
Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
@ -541,7 +546,7 @@ class RelocInfo {
INLINE(void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Handle<Code> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(
Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
@ -585,11 +590,11 @@ class RelocInfo {
// the break points where straight-line code is patched with a call
// instruction.
INLINE(Address debug_call_address());
INLINE(void set_debug_call_address(Address target));
INLINE(void set_debug_call_address(Isolate*, Address target));
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
INLINE(void WipeOut());
INLINE(void WipeOut(Isolate* isolate));
template<typename StaticVisitor> inline void Visit(Heap* heap);
@ -603,7 +608,7 @@ class RelocInfo {
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
static bool RequiresRelocation(const CodeDesc& desc);
static bool RequiresRelocation(Isolate* isolate, const CodeDesc& desc);
#endif
#ifdef ENABLE_DISASSEMBLER
@ -623,11 +628,11 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
void unchecked_update_wasm_memory_reference(Address address,
void unchecked_update_wasm_memory_reference(Isolate* isolate, Address address,
ICacheFlushMode flush_mode);
void unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode);
void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode);
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
// referencing the constant pool entry (except when rmode_ ==
@ -918,6 +923,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
// Direct access to FLAG_harmony_regexp_dotall.
static ExternalReference address_of_regexp_dotall_flag(Isolate* isolate);
// Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_address(
@ -981,6 +989,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference ieee754_tanh_function(Isolate* isolate);
static ExternalReference libc_memchr_function(Isolate* isolate);
static ExternalReference libc_memcpy_function(Isolate* isolate);
static ExternalReference libc_memset_function(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@ -1076,7 +1086,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Utility functions
void* libc_memchr(void* string, int character, size_t search_length);
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;

9
deps/v8/src/ast/ast-expression-rewriter.cc

@ -265,8 +265,7 @@ void AstExpressionRewriter::VisitAssignment(Assignment* node) {
AST_REWRITE_PROPERTY(Expression, node, value);
}
void AstExpressionRewriter::VisitYield(Yield* node) {
void AstExpressionRewriter::VisitSuspend(Suspend* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, generator_object);
AST_REWRITE_PROPERTY(Expression, node, expression);
@ -377,6 +376,12 @@ void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
void AstExpressionRewriter::VisitImportCallExpression(
ImportCallExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, argument);
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Block, node, block);

73
deps/v8/src/ast/ast-numbering.cc

@ -15,17 +15,19 @@ namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals)
Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile = false)
: zone_(zone),
eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0),
suspend_count_(0),
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT) {
catch_prediction_(HandlerTable::UNCAUGHT),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit);
}
@ -93,7 +95,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_;
int yield_count_;
int suspend_count_;
AstProperties properties_;
LanguageMode language_mode_;
// The slot cache allows us to reuse certain feedback slots.
@ -101,6 +103,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@ -238,12 +241,11 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
properties_.flags() & AstProperties::kMustUseIgnitionTurbo);
}
void AstNumberingVisitor::VisitYield(Yield* node) {
node->set_yield_id(yield_count_);
yield_count_++;
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
node->set_suspend_id(suspend_count_);
suspend_count_++;
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Yield::num_ids()));
node->set_base_id(ReserveIdRange(Suspend::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
}
@ -322,10 +324,17 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
// has to stash it somewhere. Changing the runtime function into another
// one in ast-numbering seemed like a simple and straightforward solution to
// that problem.
if (node->is_jsruntime() &&
node->context_index() == Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX &&
catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
if (node->is_jsruntime() && catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
switch (node->context_index()) {
case Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX:
node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
break;
case Context::ASYNC_GENERATOR_AWAIT_CAUGHT:
node->set_context_index(Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
break;
default:
break;
}
}
}
@ -342,10 +351,10 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
node->set_first_yield_id(yield_count_);
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
node->set_yield_count(yield_count_ - node->first_yield_id());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
@ -353,10 +362,10 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
node->set_first_yield_id(yield_count_);
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
@ -463,15 +472,22 @@ void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->enumerable()); // Not part of loop.
node->set_first_yield_id(yield_count_);
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
ReserveFeedbackSlots(node);
}
@ -481,12 +497,12 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_yield_id(yield_count_);
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
@ -535,11 +551,11 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_yield_id(yield_count_);
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next());
Visit(node->body());
node->set_yield_count(yield_count_ - node->first_yield_id());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
@ -616,6 +632,7 @@ void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == NULL) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
if (statements->at(i)->IsJump()) break;
}
}
@ -687,12 +704,16 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
LanguageModeScope language_mode_scope(this, node->language_mode());
if (collect_type_profile_) {
properties_.get_spec()->AddTypeProfileSlot();
}
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_yield_count(yield_count_);
node->set_suspend_count(suspend_count_);
if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) {
@ -714,12 +735,14 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
AstNumberingVisitor visitor(stack_limit, zone, eager_literals,
collect_type_profile);
return visitor.Renumber(function);
}
} // namespace internal

30
deps/v8/src/ast/ast-numbering.h

@ -22,29 +22,33 @@ template <typename T>
class ZoneVector;
namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
// tree; perform catch prediction for TryStatements. If |eager_literals| is
// Assign type feedback IDs, bailout IDs, and generator suspend IDs to an AST
// node tree; perform catch prediction for TryStatements. If |eager_literals| is
// non-null, adds any eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals,
bool collect_type_profile = false);
}
// Some details on yield IDs
// Some details on suspend IDs
// -------------------------
//
// In order to assist Ignition in generating bytecode for a generator function,
// we assign a unique number (the yield ID) to each Yield node in its AST. We
// also annotate loops with the number of yields they contain (loop.yield_count)
// and the smallest ID of those (loop.first_yield_id), and we annotate the
// function itself with the number of yields it contains (function.yield_count).
// we assign a unique number (the suspend ID) to each Suspend node in its AST.
// We also annotate loops with the number of suspends they contain
// (loop.suspend_count) and the smallest ID of those (loop.first_suspend_id),
// and we annotate the function itself with the number of suspends it contains
// (function.suspend_count).
//
// The way in which we choose the IDs is simply by enumerating the Yield nodes.
// The way in which we choose the IDs is simply by enumerating the Suspend
// nodes.
// Ignition relies on the following properties:
// - For each loop l and each yield y of l:
// l.first_yield_id <= y.yield_id < l.first_yield_id + l.yield_count
// - For the generator function f itself and each yield y of f:
// 0 <= y.yield_id < f.yield_count
// - For each loop l and each suspend y of l:
// l.first_suspend_id <=
// s.suspend_id < l.first_suspend_id + l.suspend_count
// - For the generator function f itself and each suspend s of f:
// 0 <= s.suspend_id < f.suspend_count
} // namespace internal
} // namespace v8

9
deps/v8/src/ast/ast-traversal-visitor.h

@ -357,7 +357,7 @@ void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) {
void AstTraversalVisitor<Subclass>::VisitSuspend(Suspend* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->generator_object()));
RECURSE_EXPRESSION(Visit(expr->expression()));
@ -476,6 +476,13 @@ void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
RECURSE_EXPRESSION(Visit(expr->iterable()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitImportCallExpression(
ImportCallExpression* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->argument()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) {

4
deps/v8/src/ast/ast-types.cc

@ -186,7 +186,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
if (map == heap->boolean_map()) return kBoolean;
if (map == heap->the_hole_map()) return kHole;
DCHECK(map == heap->uninitialized_map() ||
map == heap->no_interceptor_result_sentinel_map() ||
map == heap->termination_exception_map() ||
map == heap->arguments_marker_map() ||
map == heap->optimized_out_map() ||
@ -209,6 +208,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
@ -287,6 +287,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
return kOtherInternal & kTaggedPointer;
// Remaining instance types are unsupported for now. If any of them do
@ -311,6 +312,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:

84
deps/v8/src/ast/ast-value-factory.cc

@ -84,21 +84,8 @@ class AstRawStringInternalizationKey : public HashTableKey {
const AstRawString* string_;
};
int AstString::length() const {
if (IsRawStringBits::decode(bit_field_)) {
return reinterpret_cast<const AstRawString*>(this)->length();
}
return reinterpret_cast<const AstConsString*>(this)->length();
}
void AstString::Internalize(Isolate* isolate) {
if (IsRawStringBits::decode(bit_field_)) {
return reinterpret_cast<AstRawString*>(this)->Internalize(isolate);
}
return reinterpret_cast<AstConsString*>(this)->Internalize(isolate);
}
void AstRawString::Internalize(Isolate* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) {
set_string(isolate->factory()->empty_string());
} else {
@ -121,18 +108,26 @@ bool AstRawString::AsArrayIndex(uint32_t* index) const {
}
bool AstRawString::IsOneByteEqualTo(const char* data) const {
int length = static_cast<int>(strlen(data));
if (is_one_byte() && literal_bytes_.length() == length) {
const char* token = reinterpret_cast<const char*>(literal_bytes_.start());
return !strncmp(token, data, length);
if (!is_one_byte()) return false;
size_t length = static_cast<size_t>(literal_bytes_.length());
if (length != strlen(data)) return false;
return 0 == strncmp(reinterpret_cast<const char*>(literal_bytes_.start()),
data, length);
}
return false;
uint16_t AstRawString::FirstCharacter() const {
if (is_one_byte()) return literal_bytes_[0];
const uint16_t* c = reinterpret_cast<const uint16_t*>(literal_bytes_.start());
return *c;
}
bool AstRawString::Compare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
DCHECK_EQ(lhs->hash(), rhs->hash());
if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data();
@ -161,11 +156,20 @@ bool AstRawString::Compare(void* a, void* b) {
}
void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are
// already internalized.
set_string(isolate->factory()
->NewConsString(left_->string(), right_->string())
.ToHandleChecked());
if (IsEmpty()) {
set_string(isolate->factory()->empty_string());
return;
}
// AstRawStrings are internalized before AstConsStrings, so
// AstRawString::string() will just work.
Handle<String> tmp(segment_.string->string());
for (AstConsString::Segment* current = segment_.next; current != nullptr;
current = current->next) {
tmp = isolate->factory()
->NewConsString(current->string->string(), tmp)
.ToHandleChecked();
}
set_string(tmp);
}
bool AstValue::IsPropertyName() const {
@ -285,22 +289,34 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
return result;
}
const AstConsString* AstValueFactory::NewConsString(
const AstString* left, const AstString* right) {
// This Vector will be valid as long as the Collector is alive (meaning that
// the AstRawString will not be moved).
AstConsString* new_string = new (zone_) AstConsString(left, right);
CHECK(new_string != nullptr);
AddString(new_string);
AstConsString* AstValueFactory::NewConsString() {
AstConsString* new_string = new (zone_) AstConsString;
DCHECK_NOT_NULL(new_string);
AddConsString(new_string);
return new_string;
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str) {
return NewConsString()->AddString(zone_, str);
}
AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
const AstRawString* str2) {
return NewConsString()->AddString(zone_, str1)->AddString(zone_, str2);
}
void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to
// strings.
for (AstString* current = strings_; current != nullptr;) {
AstString* next = current->next();
for (AstRawString* current = strings_; current != nullptr;) {
AstRawString* next = current->next();
current->Internalize(isolate);
current = next;
}
// AstConsStrings refer to AstRawStrings.
for (AstConsString* current = cons_strings_; current != nullptr;) {
AstConsString* next = current->next();
current->Internalize(isolate);
current = next;
}

211
deps/v8/src/ast/ast-value-factory.h

@ -35,123 +35,144 @@
#include "src/isolate.h"
#include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values
// independent of the V8 heap and internalizing them later. During parsing,
// AstStrings and AstValues are created and stored outside the heap, in
// AstValueFactory. After parsing, the strings and values are internalized
// (moved into the V8 heap).
// Ast(Raw|Cons)String, AstValue and AstValueFactory are for storing strings and
// values independent of the V8 heap and internalizing them later. During
// parsing, they are created and stored outside the heap, in AstValueFactory.
// After parsing, the strings and values are internalized (moved into the V8
// heap).
namespace v8 {
namespace internal {
class AstString : public ZoneObject {
public:
explicit AstString(bool is_raw)
: next_(nullptr), bit_field_(IsRawStringBits::encode(is_raw)) {}
int length() const;
bool IsEmpty() const { return length() == 0; }
// Puts the string into the V8 heap.
void Internalize(Isolate* isolate);
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
return Handle<String>(string_);
}
AstString* next() { return next_; }
AstString** next_location() { return &next_; }
protected:
void set_string(Handle<String> string) { string_ = string.location(); }
// {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
union {
AstString* next_;
String** string_;
};
// Poor-man's virtual dispatch to AstRawString / AstConsString. Takes less
// memory.
class IsRawStringBits : public BitField<bool, 0, 1> {};
int bit_field_;
};
class AstRawString final : public AstString {
class AstRawString final : public ZoneObject {
public:
bool IsEmpty() const { return literal_bytes_.length() == 0; }
int length() const {
if (is_one_byte()) return literal_bytes_.length();
return literal_bytes_.length() / 2;
return is_one_byte() ? literal_bytes_.length()
: literal_bytes_.length() / 2;
}
int byte_length() const { return literal_bytes_.length(); }
bool AsArrayIndex(uint32_t* index) const;
bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const;
void Internalize(Isolate* isolate);
bool AsArrayIndex(uint32_t* index) const;
// The string is not null-terminated, use length() to find out the length.
// Access the physical representation:
bool is_one_byte() const { return is_one_byte_; }
int byte_length() const { return literal_bytes_.length(); }
const unsigned char* raw_data() const {
return literal_bytes_.start();
}
bool is_one_byte() const { return IsOneByteBits::decode(bit_field_); }
bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const {
if (is_one_byte()) return literal_bytes_[0];
const uint16_t* c =
reinterpret_cast<const uint16_t*>(literal_bytes_.start());
return *c;
}
static bool Compare(void* a, void* b);
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
}
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
DCHECK(has_string_);
return Handle<String>(string_);
}
private:
friend class AstRawStringInternalizationKey;
friend class AstStringConstants;
friend class AstValueFactory;
// Members accessed only by the AstValueFactory & related classes:
static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash)
: AstString(true), hash_(hash), literal_bytes_(literal_bytes) {
bit_field_ |= IsOneByteBits::encode(is_one_byte);
: next_(nullptr),
literal_bytes_(literal_bytes),
hash_(hash),
is_one_byte_(is_one_byte) {}
AstRawString* next() {
DCHECK(!has_string_);
return next_;
}
AstRawString** next_location() {
DCHECK(!has_string_);
return &next_;
}
AstRawString() : AstString(true), hash_(0) {
bit_field_ |= IsOneByteBits::encode(true);
void set_string(Handle<String> string) {
DCHECK(!string.is_null());
DCHECK(!has_string_);
string_ = string.location();
#ifdef DEBUG
has_string_ = true;
#endif
}
class IsOneByteBits : public BitField<bool, IsRawStringBits::kNext, 1> {};
// {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
union {
AstRawString* next_;
String** string_;
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
uint32_t hash_;
// Points to memory owned by Zone.
Vector<const byte> literal_bytes_;
bool is_one_byte_;
#ifdef DEBUG
// (Debug-only:) Verify the object life-cylce: Some functions may only be
// called after internalization (that is, after a v8::internal::String has
// been set); some only before.
bool has_string_ = false;
#endif
};
class AstConsString final : public AstString {
class AstConsString final : public ZoneObject {
public:
AstConsString(const AstString* left, const AstString* right)
: AstString(false),
length_(left->length() + right->length()),
left_(left),
right_(right) {}
AstConsString* AddString(Zone* zone, const AstRawString* s) {
if (s->IsEmpty()) return this;
if (!IsEmpty()) {
// We're putting the new string to the head of the list, meaning
// the string segments will be in reverse order.
Segment* tmp = new (zone->New(sizeof(Segment))) Segment;
*tmp = segment_;
segment_.next = tmp;
}
segment_.string = s;
return this;
}
int length() const { return length_; }
bool IsEmpty() const {
DCHECK_IMPLIES(segment_.string == nullptr, segment_.next == nullptr);
DCHECK_IMPLIES(segment_.string != nullptr, !segment_.string->IsEmpty());
return segment_.string == nullptr;
}
void Internalize(Isolate* isolate);
V8_INLINE Handle<String> string() const {
DCHECK_NOT_NULL(string_);
return Handle<String>(string_);
}
private:
const int length_;
const AstString* left_;
const AstString* right_;
friend class AstValueFactory;
AstConsString() : next_(nullptr), segment_({nullptr, nullptr}) {}
AstConsString* next() const { return next_; }
AstConsString** next_location() { return &next_; }
// {string_} is stored as String** instead of a Handle<String> so it can be
// stored in a union with {next_}.
void set_string(Handle<String> string) { string_ = string.location(); }
union {
AstConsString* next_;
String** string_;
};
struct Segment {
const AstRawString* string;
AstConsString::Segment* next;
};
Segment segment_;
};
enum class AstSymbol : uint8_t { kHomeObjectSymbol };
@ -310,6 +331,7 @@ class AstValue : public ZoneObject {
F(arguments, "arguments") \
F(async, "async") \
F(await, "await") \
F(boolean, "boolean") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
@ -330,11 +352,15 @@ class AstValue : public ZoneObject {
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
F(number, "number") \
F(object, "object") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(return, "return") \
F(set_space, "set ") \
F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
F(this, "this") \
F(this_function, ".this_function") \
F(throw, "throw") \
@ -407,7 +433,10 @@ class AstValueFactory {
values_(nullptr),
strings_(nullptr),
strings_end_(&strings_),
cons_strings_(nullptr),
cons_strings_end_(&cons_strings_),
string_constants_(string_constants),
empty_cons_string_(nullptr),
zone_(zone),
hash_seed_(hash_seed) {
#define F(name) name##_ = nullptr;
@ -418,6 +447,7 @@ class AstValueFactory {
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
empty_cons_string_ = NewConsString();
}
Zone* zone() const { return zone_; }
@ -433,17 +463,20 @@ class AstValueFactory {
return GetTwoByteStringInternal(literal);
}
const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
V8_EXPORT_PRIVATE AstConsString* NewConsString();
AstConsString* NewConsString(const AstRawString* str);
AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
#define F(name, str) \
const AstRawString* name##_string() { \
const AstRawString* name##_string() const { \
return string_constants_->name##_string(); \
}
STRING_CONSTANTS(F)
#undef F
const AstConsString* empty_cons_string() const { return empty_cons_string_; }
V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6).
@ -467,14 +500,21 @@ class AstValueFactory {
values_ = value;
return value;
}
AstString* AddString(AstString* string) {
AstRawString* AddString(AstRawString* string) {
*strings_end_ = string;
strings_end_ = string->next_location();
return string;
}
AstConsString* AddConsString(AstConsString* string) {
*cons_strings_end_ = string;
cons_strings_end_ = string->next_location();
return string;
}
void ResetStrings() {
strings_ = nullptr;
strings_end_ = &strings_;
cons_strings_ = nullptr;
cons_strings_end_ = &cons_strings_;
}
V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
Vector<const uint8_t> literal);
@ -490,11 +530,14 @@ class AstValueFactory {
// We need to keep track of strings_ in order since cons strings require their
// members to be internalized first.
AstString* strings_;
AstString** strings_end_;
AstRawString* strings_;
AstRawString** strings_end_;
AstConsString* cons_strings_;
AstConsString** cons_strings_end_;
// Holds constant string values which are shared across the isolate.
const AstStringConstants* string_constants_;
const AstConsString* empty_cons_string_;
// Caches for faster access: small numbers, one character lowercase strings
// (for minified code).

64
deps/v8/src/ast/ast.cc

@ -51,6 +51,7 @@ static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) {
AllowHandleDereference allow_deref;
AstPrinter::PrintOut(isolate, this);
}
@ -163,7 +164,7 @@ void Expression::MarkTail() {
bool DoExpression::IsAnonymousFunctionDefinition() const {
// This is specifically to allow DoExpressions to represent ClassLiterals.
return represented_function_ != nullptr &&
represented_function_->raw_name()->length() == 0;
represented_function_->raw_name()->IsEmpty();
}
bool Statement::IsJump() const {
@ -249,16 +250,16 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
FeedbackSlot* out_slot) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE &&
expr->AsVariableProxy()->var()->IsUnallocated()) ||
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
// TODO(ishell): consider using ICSlotCache for variables here.
if (assign_type == KEYED_PROPERTY) {
*out_slot = spec->AddKeyedStoreICSlot(language_mode);
if (assign_type == VARIABLE &&
expr->AsVariableProxy()->var()->IsUnallocated()) {
*out_slot = spec->AddStoreGlobalICSlot(language_mode);
} else {
} else if (assign_type == NAMED_PROPERTY) {
*out_slot = spec->AddStoreICSlot(language_mode);
}
} else if (assign_type == KEYED_PROPERTY) {
*out_slot = spec->AddKeyedStoreICSlot(language_mode);
}
}
@ -681,8 +682,8 @@ bool ObjectLiteral::IsFastCloningSupported() const {
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && has_shallow_properties() &&
properties_count() <= ConstructorBuiltinsAssembler::
kMaximumClonedShallowObjectProperties;
properties_count() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
ElementsKind ArrayLiteral::constant_elements_kind() const {
@ -786,7 +787,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 &&
values()->length() <=
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
}
void ArrayLiteral::RewindSpreads() {
@ -883,6 +884,30 @@ void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
}
}
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {
// Add is not commutative due to potential for string addition.
return op == Token::MUL || op == Token::BIT_AND || op == Token::BIT_OR ||
op == Token::BIT_XOR;
}
// Check for the pattern: x + 1.
static bool MatchSmiLiteralOperation(Expression* left, Expression* right,
Expression** expr, Smi** literal) {
if (right->IsSmiLiteral()) {
*expr = left;
*literal = right->AsLiteral()->AsSmiLiteral();
return true;
}
return false;
}
bool BinaryOperation::IsSmiLiteralOperation(Expression** subexpr,
Smi** literal) {
return MatchSmiLiteralOperation(left_, right_, subexpr, literal) ||
(IsCommutativeOperationWithSmiLiteral(op()) &&
MatchSmiLiteralOperation(right_, left_, subexpr, literal));
}
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
@ -904,24 +929,21 @@ void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
}
// Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left,
Token::Value op,
Expression* right,
Expression** expr,
Handle<String>* check) {
static bool MatchLiteralCompareTypeof(Expression* left, Token::Value op,
Expression* right, Expression** expr,
Literal** literal) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
*check = Handle<String>::cast(right->AsLiteral()->value());
*literal = right->AsLiteral();
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
return MatchLiteralCompareTypeof(left_, op(), right_, expr, check) ||
MatchLiteralCompareTypeof(right_, op(), left_, expr, check);
Literal** literal) {
return MatchLiteralCompareTypeof(left_, op(), right_, expr, literal) ||
MatchLiteralCompareTypeof(right_, op(), left_, expr, literal);
}

173
deps/v8/src/ast/ast.h

@ -91,7 +91,7 @@ namespace internal {
V(Conditional) \
V(VariableProxy) \
V(Literal) \
V(Yield) \
V(Suspend) \
V(Throw) \
V(CallRuntime) \
V(UnaryOperation) \
@ -105,7 +105,8 @@ namespace internal {
V(EmptyParentheses) \
V(GetIterator) \
V(DoExpression) \
V(RewritableExpression)
V(RewritableExpression) \
V(ImportCallExpression)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@ -563,11 +564,11 @@ class IterationStatement : public BreakableStatement {
Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; }
int yield_count() const { return yield_count_; }
int first_yield_id() const { return first_yield_id_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; }
void set_first_yield_id(int first_yield_id) {
first_yield_id_ = first_yield_id;
int suspend_count() const { return suspend_count_; }
int first_suspend_id() const { return first_suspend_id_; }
void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
void set_first_suspend_id(int first_suspend_id) {
first_suspend_id_ = first_suspend_id;
}
static int num_ids() { return parent_num_ids() + 1; }
@ -581,8 +582,8 @@ class IterationStatement : public BreakableStatement {
NodeType type)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, type),
body_(NULL),
yield_count_(0),
first_yield_id_(0) {}
suspend_count_(0),
first_suspend_id_(0) {}
static int parent_num_ids() { return BreakableStatement::num_ids(); }
void Initialize(Statement* body) { body_ = body; }
@ -594,8 +595,8 @@ class IterationStatement : public BreakableStatement {
Statement* body_;
Label continue_target_;
int yield_count_;
int first_yield_id_;
int suspend_count_;
int first_suspend_id_;
};
@ -1101,7 +1102,6 @@ class TryStatement : public Statement {
class TryCatchStatement final : public TryStatement {
public:
Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
@ -1122,18 +1122,15 @@ class TryCatchStatement final : public TryStatement {
private:
friend class AstNodeFactory;
TryCatchStatement(Block* try_block, Scope* scope, Variable* variable,
Block* catch_block,
TryCatchStatement(Block* try_block, Scope* scope, Block* catch_block,
HandlerTable::CatchPrediction catch_prediction, int pos)
: TryStatement(try_block, pos, kTryCatchStatement),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {
catch_prediction_ = catch_prediction;
}
Scope* scope_;
Variable* variable_;
Block* catch_block_;
};
@ -1205,6 +1202,11 @@ class Literal final : public Expression {
return value_->AsString();
}
Smi* AsSmiLiteral() {
DCHECK(IsSmiLiteral());
return raw_value()->AsSmi();
}
bool ToBooleanIsTrue() const { return raw_value()->BooleanValue(); }
bool ToBooleanIsFalse() const { return !raw_value()->BooleanValue(); }
@ -2138,6 +2140,11 @@ class BinaryOperation final : public Expression {
TypeFeedbackId BinaryOperationFeedbackId() const {
return TypeFeedbackId(local_id(1));
}
// Returns true if one side is a Smi literal, returning the other side's
// sub-expression in |subexpr| and the literal Smi in |literal|.
bool IsSmiLiteralOperation(Expression** subexpr, Smi** literal);
Maybe<int> fixed_right_arg() const {
return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>();
}
@ -2279,7 +2286,7 @@ class CompareOperation final : public Expression {
FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareTypeof(Expression** expr, Literal** literal);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
@ -2493,10 +2500,16 @@ class RewritableExpression final : public Expression {
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
};
// There are several types of Suspend node:
//
// Yield
// YieldStar
// Await
//
// Our Yield is different from the JS yield in that it "returns" its argument as
// is, without wrapping it in an iterator result object. Such wrapping, if
// desired, must be done beforehand (see the parser).
class Yield final : public Expression {
class Suspend final : public Expression {
public:
enum OnException { kOnExceptionThrow, kOnExceptionRethrow };
@ -2508,30 +2521,59 @@ class Yield final : public Expression {
bool rethrow_on_exception() const {
return on_exception() == kOnExceptionRethrow;
}
int yield_id() const { return yield_id_; }
int suspend_id() const { return suspend_id_; }
SuspendFlags flags() const { return FlagsField::decode(bit_field_); }
SuspendFlags suspend_type() const {
return flags() & SuspendFlags::kSuspendTypeMask;
}
SuspendFlags generator_type() const {
return flags() & SuspendFlags::kGeneratorTypeMask;
}
bool is_yield() const { return suspend_type() == SuspendFlags::kYield; }
bool is_yield_star() const {
return suspend_type() == SuspendFlags::kYieldStar;
}
bool is_await() const { return suspend_type() == SuspendFlags::kAwait; }
bool is_async_generator() const {
return generator_type() == SuspendFlags::kAsyncGenerator;
}
inline bool IsNonInitialAsyncGeneratorYield() const {
// Return true if is_async_generator() && !is_await() && yield_id() > 0
return suspend_id() > 0 && (flags() & SuspendFlags::kAsyncGeneratorAwait) ==
SuspendFlags::kAsyncGenerator;
}
void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
void set_yield_id(int yield_id) { yield_id_ = yield_id; }
void set_suspend_id(int id) { suspend_id_ = id; }
void set_suspend_type(SuspendFlags type) {
DCHECK_EQ(0, static_cast<int>(type & ~SuspendFlags::kSuspendTypeMask));
bit_field_ = FlagsField::update(bit_field_, type);
}
private:
friend class AstNodeFactory;
Yield(Expression* generator_object, Expression* expression, int pos,
OnException on_exception)
: Expression(pos, kYield),
yield_id_(-1),
Suspend(Expression* generator_object, Expression* expression, int pos,
OnException on_exception, SuspendFlags flags)
: Expression(pos, kSuspend),
suspend_id_(-1),
generator_object_(generator_object),
expression_(expression) {
bit_field_ |= OnExceptionField::encode(on_exception);
bit_field_ |=
OnExceptionField::encode(on_exception) | FlagsField::encode(flags);
}
int yield_id_;
int suspend_id_;
Expression* generator_object_;
Expression* expression_;
class OnExceptionField
: public BitField<OnException, Expression::kNextBitFieldIndex, 1> {};
class FlagsField
: public BitField<SuspendFlags, OnExceptionField::kNext,
static_cast<int>(SuspendFlags::kBitWidth)> {};
};
@ -2566,8 +2608,8 @@ class FunctionLiteral final : public Expression {
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
Handle<String> name() const { return raw_name_->string(); }
const AstString* raw_name() const { return raw_name_; }
void set_raw_name(const AstString* name) { raw_name_ = name; }
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
@ -2593,7 +2635,11 @@ class FunctionLiteral final : public Expression {
static bool NeedsHomeObject(Expression* expr);
int expected_property_count() { return expected_property_count_; }
int expected_property_count() {
// Not valid for lazy functions.
DCHECK_NOT_NULL(body_);
return expected_property_count_;
}
int parameter_count() { return parameter_count_; }
int function_length() { return function_length_; }
@ -2626,7 +2672,7 @@ class FunctionLiteral final : public Expression {
raw_inferred_name_ = NULL;
}
void set_raw_inferred_name(const AstString* raw_inferred_name) {
void set_raw_inferred_name(const AstConsString* raw_inferred_name) {
DCHECK(raw_inferred_name != NULL);
raw_inferred_name_ = raw_inferred_name;
DCHECK(inferred_name_.is_null());
@ -2637,6 +2683,8 @@ class FunctionLiteral final : public Expression {
void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
bool has_duplicate_parameters() const {
// Not valid for lazy functions.
DCHECK_NOT_NULL(body_);
return HasDuplicateParameters::decode(bit_field_);
}
@ -2682,8 +2730,8 @@ class FunctionLiteral final : public Expression {
return is_anonymous_expression();
}
int yield_count() { return yield_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; }
int suspend_count() { return suspend_count_; }
void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
int return_position() {
return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
@ -2697,7 +2745,7 @@ class FunctionLiteral final : public Expression {
private:
friend class AstNodeFactory;
FunctionLiteral(Zone* zone, const AstString* name,
FunctionLiteral(Zone* zone, const AstRawString* name,
AstValueFactory* ast_value_factory, DeclarationScope* scope,
ZoneList<Statement*>* body, int expected_property_count,
int parameter_count, int function_length,
@ -2710,12 +2758,12 @@ class FunctionLiteral final : public Expression {
parameter_count_(parameter_count),
function_length_(function_length),
function_token_position_(kNoSourcePosition),
yield_count_(0),
suspend_count_(0),
has_braces_(has_braces),
raw_name_(name),
raw_name_(ast_value_factory->NewConsString(name)),
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
ast_properties_(zone),
function_literal_id_(function_literal_id) {
bit_field_ |= FunctionTypeBits::encode(function_type) |
@ -2725,6 +2773,7 @@ class FunctionLiteral final : public Expression {
ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
class FunctionTypeBits
@ -2741,13 +2790,13 @@ class FunctionLiteral final : public Expression {
int parameter_count_;
int function_length_;
int function_token_position_;
int yield_count_;
int suspend_count_;
bool has_braces_;
const AstString* raw_name_;
const AstConsString* raw_name_;
DeclarationScope* scope_;
ZoneList<Statement*>* body_;
const AstString* raw_inferred_name_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
int function_literal_id_;
@ -2925,6 +2974,21 @@ class SuperCallReference final : public Expression {
VariableProxy* this_function_var_;
};
// This AST Node is used to represent a dynamic import call --
// import(argument).
class ImportCallExpression final : public Expression {
public:
Expression* argument() const { return argument_; }
void set_argument(Expression* argument) { argument_ = argument; }
private:
friend class AstNodeFactory;
ImportCallExpression(Expression* argument, int pos)
: Expression(pos, kImportCallExpression), argument_(argument) {}
Expression* argument_;
};
// This class is produced when parsing the () in arrow functions without any
// arguments and is not actually a valid expression.
@ -3245,37 +3309,32 @@ class AstNodeFactory final BASE_EMBEDDED {
}
TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
Block* catch_block, int pos) {
return new (zone_) TryCatchStatement(
try_block, scope, variable, catch_block, HandlerTable::CAUGHT, pos);
return new (zone_) TryCatchStatement(try_block, scope, catch_block,
HandlerTable::CAUGHT, pos);
}
TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (zone_) TryCatchStatement(
try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
return new (zone_) TryCatchStatement(try_block, scope, catch_block,
HandlerTable::UNCAUGHT, pos);
}
TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (zone_) TryCatchStatement(
try_block, scope, variable, catch_block, HandlerTable::DESUGARING, pos);
return new (zone_) TryCatchStatement(try_block, scope, catch_block,
HandlerTable::DESUGARING, pos);
}
TryCatchStatement* NewTryCatchStatementForAsyncAwait(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (zone_)
TryCatchStatement(try_block, scope, variable, catch_block,
return new (zone_) TryCatchStatement(try_block, scope, catch_block,
HandlerTable::ASYNC_AWAIT, pos);
}
@ -3481,10 +3540,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return assign;
}
Yield* NewYield(Expression* generator_object, Expression* expression, int pos,
Yield::OnException on_exception) {
Suspend* NewSuspend(Expression* generator_object, Expression* expression,
int pos, Suspend::OnException on_exception,
SuspendFlags flags) {
if (!expression) expression = NewUndefinedLiteral(pos);
return new (zone_) Yield(generator_object, expression, pos, on_exception);
return new (zone_)
Suspend(generator_object, expression, pos, on_exception, flags);
}
Throw* NewThrow(Expression* exception, int pos) {
@ -3578,6 +3639,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) GetIterator(iterable, hint, pos);
}
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
return new (zone_) ImportCallExpression(args, pos);
}
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }

2
deps/v8/src/ast/context-slot-cache.h

@ -38,7 +38,7 @@ class ContextSlotCache {
for (int i = 0; i < kLength; ++i) {
keys_[i].data = NULL;
keys_[i].name = NULL;
values_[i] = kNotFound;
values_[i] = static_cast<uint32_t>(kNotFound);
}
}

3
deps/v8/src/ast/modules.h

@ -214,8 +214,9 @@ class ModuleDescriptor : public ZoneObject {
int AddModuleRequest(const AstRawString* specifier) {
DCHECK_NOT_NULL(specifier);
int module_requests_count = static_cast<int>(module_requests_.size());
auto it = module_requests_
.insert(std::make_pair(specifier, module_requests_.size()))
.insert(std::make_pair(specifier, module_requests_count))
.first;
return it->second;
}

73
deps/v8/src/ast/prettyprinter.cc

@ -254,9 +254,7 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->value());
}
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
void CallPrinter::VisitSuspend(Suspend* node) { Find(node->expression()); }
void CallPrinter::VisitThrow(Throw* node) { Find(node->exception()); }
@ -372,8 +370,23 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
}
void CallPrinter::VisitGetIterator(GetIterator* node) {
Print("GetIterator(");
// Because CallPrinter is used by RenderCallSite() in runtime-internal.cc,
// and the GetIterator node results in a Call, either to a [@@iterator] or
// [@@asyncIterator]. It's unknown which call this error refers to, so we
// assume it's the first call.
bool was_found = !found_ && node->position() == position_;
if (was_found) {
found_ = true;
}
Find(node->iterable(), true);
Print(node->hint() == IteratorType::kNormal ? "[Symbol.iterator]"
: "[Symbol.asyncIterator]");
if (was_found) done_ = true;
}
void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print("ImportCall(");
Find(node->argument(), true);
Print(")");
}
@ -623,7 +636,8 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
} else {
EmbeddedVector<char, 256> buf;
int pos =
SNPrintF(buf, "%s (mode = %s", info, VariableMode2String(var->mode()));
SNPrintF(buf, "%s (%p) (mode = %s", info, reinterpret_cast<void*>(var),
VariableMode2String(var->mode()));
SNPrintF(buf + pos, ")");
PrintLiteralIndented(buf.start(), value, true);
}
@ -649,8 +663,8 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
{ IndentedScope indent(this, "FUNC", program->position());
PrintIndented("KIND");
Print(" %d\n", program->kind());
PrintIndented("YIELD COUNT");
Print(" %d\n", program->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", program->suspend_count());
PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope());
@ -801,8 +815,8 @@ void AstPrinter::VisitCaseClause(CaseClause* clause) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
PrintIndented("YIELD COUNT");
Print(" %d\n", node->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@ -811,8 +825,8 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
PrintIndented("YIELD COUNT");
Print(" %d\n", node->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@ -821,8 +835,8 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
PrintIndented("YIELD COUNT");
Print(" %d\n", node->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->suspend_count());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@ -833,8 +847,8 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
PrintIndented("YIELD COUNT");
Print(" %d\n", node->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->suspend_count());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@ -843,8 +857,8 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
PrintIndented("YIELD COUNT");
Print(" %d\n", node->yield_count());
PrintIndented("SUSPEND COUNT");
Print(" %d\n", node->suspend_count());
PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result());
PrintIndentedVisit("DONE", node->result_done());
@ -856,9 +870,8 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH", node->position());
PrintTryStatement(node);
PrintLiteralWithModeIndented("CATCHVAR",
node->variable(),
node->variable()->name());
PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
node->scope()->catch_variable()->name());
PrintIndentedVisit("CATCH", node->catch_block());
}
@ -1095,10 +1108,9 @@ void AstPrinter::VisitAssignment(Assignment* node) {
Visit(node->value());
}
void AstPrinter::VisitYield(Yield* node) {
void AstPrinter::VisitSuspend(Suspend* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD id %d", node->yield_id());
SNPrintF(buf, "SUSPEND id %d", node->suspend_id());
IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
@ -1146,14 +1158,8 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
if (node->is_jsruntime()) {
SNPrintF(
buf, "CALL RUNTIME %s code = %p", node->debug_name(),
static_cast<void*>(isolate_->context()->get(node->context_index())));
} else {
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
}
SNPrintF(buf, "CALL RUNTIME %s%s", node->debug_name(),
node->is_jsruntime() ? " (JS function)" : "");
IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments());
}
@ -1203,6 +1209,11 @@ void AstPrinter::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
IndentedScope indent(this, "IMPORT-CALL", node->position());
Visit(node->argument());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION", node->position());
}

252
deps/v8/src/ast/scopes.cc

@ -112,11 +112,12 @@ void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
}
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
: ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
: ZoneHashMap(8, ZoneAllocationPolicy(zone)), count_(0) {}
void SloppyBlockFunctionMap::Declare(
Zone* zone, const AstRawString* name,
SloppyBlockFunctionMap::Delegate* delegate) {
void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
Scope* scope,
SloppyBlockFunctionStatement* statement) {
auto* delegate = new (zone) Delegate(scope, statement, count_++);
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
Entry* p =
@ -155,14 +156,22 @@ Scope::Snapshot::Snapshot(Scope* scope)
top_inner_scope_(scope->inner_scope_),
top_unresolved_(scope->unresolved_),
top_local_(scope->GetClosureScope()->locals_.end()),
top_decl_(scope->GetClosureScope()->decls_.end()) {}
top_decl_(scope->GetClosureScope()->decls_.end()),
outer_scope_calls_eval_(scope->scope_calls_eval_) {
// Reset in order to record eval calls during this Snapshot's lifetime.
outer_scope_->scope_calls_eval_ = false;
}
Scope::Snapshot::~Snapshot() {
// Restore previous calls_eval bit if needed.
if (outer_scope_calls_eval_) {
outer_scope_->scope_calls_eval_ = true;
}
}
DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory)
: Scope(zone),
function_kind_(kNormalFunction),
params_(4, zone),
sloppy_block_function_map_(zone) {
: Scope(zone), function_kind_(kNormalFunction), params_(4, zone) {
DCHECK_EQ(scope_type_, SCRIPT_SCOPE);
SetDefaults();
@ -176,8 +185,7 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
FunctionKind function_kind)
: Scope(zone, outer_scope, scope_type),
function_kind_(function_kind),
params_(4, zone),
sloppy_block_function_map_(zone) {
params_(4, zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults();
asm_function_ = outer_scope_->IsAsmModule();
@ -193,10 +201,11 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
DeclareThis(ast_value_factory);
}
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
Zone* zone = avfactory->zone();
Isolate* isolate = scope_info->GetIsolate();
Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
set_language_mode(STRICT);
@ -254,20 +263,22 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
set_language_mode(scope_info->language_mode());
num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
// We don't really need to use the preparsed scope data; this is just to
// shorten the recursion in SetMustUsePreParsedScopeData.
must_use_preparsed_scope_data_ = true;
}
DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type,
Handle<ScopeInfo> scope_info)
: Scope(zone, scope_type, scope_info),
function_kind_(scope_info->function_kind()),
params_(0, zone),
sloppy_block_function_map_(zone) {
params_(0, zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults();
}
Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
Handle<ScopeInfo> scope_info)
MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
@ -280,7 +291,8 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
Variable* variable = Declare(zone, catch_variable_name, VAR);
Variable* variable = Declare(zone, catch_variable_name, VAR, NORMAL_VARIABLE,
kCreatedInitialized, maybe_assigned);
AllocateHeapSlot(variable);
}
@ -293,6 +305,7 @@ void DeclarationScope::SetDefaults() {
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
has_rest_ = false;
sloppy_block_function_map_ = nullptr;
receiver_ = nullptr;
new_target_ = nullptr;
function_ = nullptr;
@ -300,6 +313,7 @@ void DeclarationScope::SetDefaults() {
rare_data_ = nullptr;
should_eager_compile_ = false;
was_lazily_parsed_ = false;
is_skipped_function_ = false;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@ -336,6 +350,8 @@ void Scope::SetDefaults() {
force_context_allocation_ = false;
is_declaration_scope_ = false;
must_use_preparsed_scope_data_ = false;
}
bool Scope::HasSimpleParameters() {
@ -369,8 +385,7 @@ bool Scope::IsAsmFunction() const {
return is_function_scope() && AsDeclarationScope()->asm_function();
}
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
ScopeInfo* scope_info,
Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) {
@ -415,15 +430,20 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
}
} else if (scope_info->scope_type() == MODULE_SCOPE) {
outer_scope = new (zone)
ModuleScope(isolate, handle(scope_info), ast_value_factory);
outer_scope =
new (zone) ModuleScope(handle(scope_info), ast_value_factory);
} else {
DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
DCHECK_EQ(scope_info->LocalCount(), 1);
String* name = scope_info->LocalName(0);
outer_scope = new (zone)
Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
handle(scope_info));
DCHECK_EQ(scope_info->ContextLocalCount(), 1);
DCHECK_EQ(scope_info->ContextLocalMode(0), VAR);
DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
String* name = scope_info->ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info->ContextLocalMaybeAssignedFlag(0);
outer_scope =
new (zone) Scope(zone, ast_value_factory->GetString(handle(name)),
maybe_assigned, handle(scope_info));
}
if (deserialization_mode == DeserializationMode::kScopesOnly) {
outer_scope->scope_info_ = Handle<ScopeInfo>::null();
@ -469,9 +489,12 @@ int Scope::num_parameters() const {
void DeclarationScope::DeclareSloppyBlockFunction(
const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement) {
auto* delegate =
new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement);
sloppy_block_function_map_.Declare(zone(), name, delegate);
if (sloppy_block_function_map_ == nullptr) {
sloppy_block_function_map_ =
new (zone()->New(sizeof(SloppyBlockFunctionMap)))
SloppyBlockFunctionMap(zone());
}
sloppy_block_function_map_->Declare(zone(), name, scope, statement);
}
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
@ -481,12 +504,19 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
bool has_simple_parameters = HasSimpleParameters();
SloppyBlockFunctionMap* map = sloppy_block_function_map();
if (map == nullptr) return;
const bool has_simple_parameters = HasSimpleParameters();
// The declarations need to be added in the order they were seen,
// so accumulate declared names sorted by index.
ZoneMap<int, const AstRawString*> names_to_declare(zone());
// For each variable which is used as a function declaration in a sloppy
// block,
SloppyBlockFunctionMap* map = sloppy_block_function_map();
for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
AstRawString* name = static_cast<AstRawString*>(p->key);
const AstRawString* name = static_cast<AstRawString*>(p->key);
// If the variable wouldn't conflict with a lexical declaration
// or parameter,
@ -509,7 +539,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
}
Variable* created_variable = nullptr;
bool declaration_queued = false;
// Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
@ -543,50 +573,59 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (!should_hoist) continue;
// Declare a var-style binding for the function in the outer scope
if (factory) {
DCHECK(!is_being_lazily_parsed_);
if (created_variable == nullptr) {
VariableProxy* proxy =
factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding check, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
created_variable = DeclareVariable(
declaration, VAR, Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
if (!declaration_queued) {
declaration_queued = true;
names_to_declare.insert({delegate->index(), name});
}
if (factory) {
DCHECK(!is_being_lazily_parsed_);
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
}
}
}
if (names_to_declare.empty()) return;
for (const auto& index_and_name : names_to_declare) {
const AstRawString* name = index_and_name.second;
if (factory) {
DCHECK(!is_being_lazily_parsed_);
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// Based on the preceding checks, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), false, nullptr,
&ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
if (created_variable == nullptr) {
created_variable = DeclareVariableName(name, VAR);
if (created_variable != kDummyPreParserVariable &&
created_variable != kDummyPreParserLexicalVariable) {
Variable* var = DeclareVariableName(name, VAR);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis);
created_variable->set_maybe_assigned();
}
}
var->set_maybe_assigned();
}
}
}
}
void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(info->isolate(),
void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
DCHECK(scope->scope_info_.is_null());
Handle<ScopeInfo> outer_scope_info;
if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
@ -595,7 +634,7 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
DeclarationScope(info->zone(), info->ast_value_factory());
info->set_script_scope(script_scope);
scope->ReplaceOuterScope(Scope::DeserializeScopeChain(
info->isolate(), info->zone(), *outer_scope_info, script_scope,
info->zone(), *outer_scope_info, script_scope,
info->ast_value_factory(),
Scope::DeserializationMode::kIncludingVariables));
} else {
@ -622,13 +661,19 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
// The outer scope is never lazy.
scope->set_should_eager_compile();
scope->AllocateVariables(info, mode);
if (scope->must_use_preparsed_scope_data_) {
DCHECK(FLAG_preparser_scope_analysis);
DCHECK_NOT_NULL(info->preparsed_scope_data());
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
info->preparsed_scope_data()->RestoreData(scope);
}
scope->AllocateVariables(info, isolate, mode);
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope()->scope_info_.is_null()) {
info->script_scope()->scope_info_ =
handle(ScopeInfo::Empty(info->isolate()));
info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
}
#ifdef DEBUG
@ -722,6 +767,16 @@ Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
return result;
}
Variable* DeclarationScope::DeclareAsyncGeneratorAwaitVar(
const AstRawString* name) {
DCHECK(is_function_scope());
DCHECK_NULL(async_generator_await_var());
Variable* result = EnsureRareData()->promise = NewTemporary(name);
DCHECK_NULL(promise_var()); // promise is alias for generator await var
result->set_is_used();
return result;
}
bool Scope::HasBeenRemoved() const {
if (sibling() == this) {
DCHECK_NULL(inner_scope_);
@ -778,7 +833,9 @@ Scope* Scope::FinalizeBlockScope() {
unresolved_ = nullptr;
}
PropagateUsageFlagsToScope(outer_scope_);
if (scope_calls_eval_) outer_scope()->scope_calls_eval_ = true;
if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
// This block does not need a context.
num_heap_slots_ = 0;
@ -820,10 +877,15 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
for (; inner_scope->sibling() != top_inner_scope_;
inner_scope = inner_scope->sibling()) {
inner_scope->outer_scope_ = new_parent;
if (inner_scope->inner_scope_calls_eval_) {
new_parent->inner_scope_calls_eval_ = true;
}
DCHECK_NE(inner_scope, new_parent);
}
inner_scope->outer_scope_ = new_parent;
if (inner_scope->inner_scope_calls_eval_) {
new_parent->inner_scope_calls_eval_ = true;
}
new_parent->inner_scope_ = new_parent->sibling_;
inner_scope->sibling_ = nullptr;
// Reset the sibling rather than the inner_scope_ since we
@ -860,6 +922,15 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
}
outer_closure->locals_.Rewind(top_local_);
outer_closure->decls_.Rewind(top_decl_);
// Move eval calls since Snapshot's creation into new_parent.
if (outer_scope_->scope_calls_eval_) {
new_parent->scope_calls_eval_ = true;
new_parent->inner_scope_calls_eval_ = true;
}
// Reset the outer_scope's eval state. It will be restored to its
// original value as necessary in the destructor of this class.
outer_scope_->scope_calls_eval_ = false;
}
void Scope::ReplaceOuterScope(Scope* outer) {
@ -871,15 +942,6 @@ void Scope::ReplaceOuterScope(Scope* outer) {
outer_scope_ = outer;
}
void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK_NOT_NULL(other);
DCHECK(!already_resolved_);
DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall();
if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
}
Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
Handle<String> name_handle = name->string();
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
@ -946,7 +1008,7 @@ Variable* Scope::Lookup(const AstRawString* name) {
Variable* DeclarationScope::DeclareParameter(
const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
bool* is_duplicate, AstValueFactory* ast_value_factory) {
bool* is_duplicate, AstValueFactory* ast_value_factory, int position) {
DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_);
@ -963,6 +1025,7 @@ Variable* DeclarationScope::DeclareParameter(
*is_duplicate = IsDeclaredParameter(name);
}
has_rest_ = is_rest;
var->set_initializer_position(position);
params_.Add(var, zone());
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
@ -1071,9 +1134,11 @@ Variable* Scope::DeclareVariable(
// will be a permitted duplicate.
FunctionKind function_kind =
declaration->AsFunctionDeclaration()->fun()->kind();
duplicate_allowed =
GetDeclarationScope()->sloppy_block_function_map()->Lookup(
const_cast<AstRawString*>(name), name->hash()) != nullptr &&
SloppyBlockFunctionMap* map =
GetDeclarationScope()->sloppy_block_function_map();
duplicate_allowed = map != nullptr &&
map->Lookup(const_cast<AstRawString*>(name),
name->hash()) != nullptr &&
!IsAsyncFunction(function_kind) &&
!(allow_harmony_restrictive_generators &&
IsGeneratorFunction(function_kind));
@ -1264,7 +1329,8 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr;
}
void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
void DeclarationScope::AllocateVariables(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
// Module variables must be allocated before variable resolution
// to ensure that AccessNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
@ -1275,16 +1341,16 @@ void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
MaybeHandle<ScopeInfo> outer_scope;
if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_;
AllocateScopeInfosRecursively(info->isolate(), outer_scope);
AllocateScopeInfosRecursively(isolate, outer_scope);
if (mode == AnalyzeMode::kDebugger) {
AllocateDebuggerScopeInfos(info->isolate(), outer_scope);
AllocateDebuggerScopeInfos(isolate, outer_scope);
}
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info.
// TODO(jochen|yangguo): Remove this requirement.
if (scope_info_.is_null()) {
scope_info_ = ScopeInfo::Create(info->isolate(), zone(), this, outer_scope);
scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
}
}
@ -1439,12 +1505,12 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
locals_.Clear();
inner_scope_ = nullptr;
unresolved_ = nullptr;
sloppy_block_function_map_ = nullptr;
if (aborted) {
// Prepare scope for use in the outer zone.
zone_ = ast_value_factory->zone();
variables_.Reset(ZoneAllocationPolicy(zone_));
sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
if (!IsArrowFunction(function_kind_)) {
DeclareDefaultFunctionVariables(ast_value_factory);
}
@ -1452,7 +1518,6 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
// Make sure this scope isn't used for allocation anymore.
zone_ = nullptr;
variables_.Invalidate();
sloppy_block_function_map_.Invalidate();
}
#ifdef DEBUG
@ -1487,11 +1552,10 @@ void DeclarationScope::AnalyzePartially(
arguments_ = nullptr;
}
if (FLAG_preparser_scope_analysis) {
// Decide context allocation for the locals and parameters and store the
// info away.
AllocateVariablesRecursively();
CollectVariableData(preparsed_scope_data);
if (FLAG_preparser_scope_analysis && preparsed_scope_data->Producing()) {
// Store the information needed for allocating the locals of this scope
// and its inner scopes.
preparsed_scope_data->SaveData(this);
}
}
#ifdef DEBUG
@ -1564,7 +1628,7 @@ void PrintVar(int indent, Variable* var) {
PrintF(".%p", reinterpret_cast<void*>(var));
else
PrintName(var->raw_name());
PrintF("; // ");
PrintF("; // (%p) ", reinterpret_cast<void*>(var));
PrintLocation(var);
bool comma = !var->IsUnallocated();
if (var->has_forced_context_allocation()) {
@ -1637,7 +1701,8 @@ void Scope::Print(int n) {
function = AsDeclarationScope()->function_var();
}
PrintF(" { // (%d, %d)\n", start_position(), end_position());
PrintF(" { // (%p) (%d, %d)\n", reinterpret_cast<void*>(this),
start_position(), end_position());
if (is_hidden()) {
Indent(n1, "// is hidden\n");
}
@ -2269,17 +2334,6 @@ void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
}
}
void Scope::CollectVariableData(PreParsedScopeData* data) {
PreParsedScopeData::ScopeScope scope_scope(data, scope_type(),
start_position(), end_position());
for (Variable* local : locals_) {
scope_scope.MaybeAddVariable(local);
}
for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
inner->CollectVariableData(data);
}
}
int Scope::StackLocalCount() const {
Variable* function =
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;

77
deps/v8/src/ast/scopes.h

@ -53,22 +53,27 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
public:
class Delegate : public ZoneObject {
public:
explicit Delegate(Scope* scope,
SloppyBlockFunctionStatement* statement = nullptr)
: scope_(scope), statement_(statement), next_(nullptr) {}
Delegate(Scope* scope, SloppyBlockFunctionStatement* statement, int index)
: scope_(scope), statement_(statement), next_(nullptr), index_(index) {}
void set_statement(Statement* statement);
void set_next(Delegate* next) { next_ = next; }
Delegate* next() const { return next_; }
Scope* scope() const { return scope_; }
int index() const { return index_; }
private:
Scope* scope_;
SloppyBlockFunctionStatement* statement_;
Delegate* next_;
int index_;
};
explicit SloppyBlockFunctionMap(Zone* zone);
void Declare(Zone* zone, const AstRawString* name, Delegate* delegate);
void Declare(Zone* zone, const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement);
private:
int count_;
};
enum class AnalyzeMode { kRegular, kDebugger };
@ -112,6 +117,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
class Snapshot final BASE_EMBEDDED {
public:
explicit Snapshot(Scope* scope);
~Snapshot();
void Reparent(DeclarationScope* new_parent) const;
@ -121,12 +127,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableProxy* top_unresolved_;
ThreadedList<Variable>::Iterator top_local_;
ThreadedList<Declaration>::Iterator top_decl_;
const bool outer_scope_calls_eval_;
};
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
ScopeInfo* scope_info,
static Scope* DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
@ -146,12 +152,22 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Assumes outer_scope_ is non-null.
void ReplaceOuterScope(Scope* outer_scope);
// Propagates any eagerly-gathered scope usage flags (such as calls_eval())
// to the passed-in scope.
void PropagateUsageFlagsToScope(Scope* other);
Zone* zone() const { return zone_; }
void SetMustUsePreParsedScopeData() {
if (must_use_preparsed_scope_data_) {
return;
}
must_use_preparsed_scope_data_ = true;
if (outer_scope_) {
outer_scope_->SetMustUsePreParsedScopeData();
}
}
bool must_use_preparsed_scope_data() const {
return must_use_preparsed_scope_data_;
}
// ---------------------------------------------------------------------------
// Declarations
@ -357,10 +373,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// The scope immediately surrounding this scope, or NULL.
Scope* outer_scope() const { return outer_scope_; }
const AstRawString* catch_variable_name() const {
Variable* catch_variable() const {
DCHECK(is_catch_scope());
DCHECK_EQ(1, num_var());
return static_cast<AstRawString*>(variables_.Start()->key);
return static_cast<Variable*>(variables_.Start()->value);
}
// ---------------------------------------------------------------------------
@ -546,12 +562,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Temporary workaround that allows masking of 'this' in debug-evalute scopes.
bool is_debug_evaluate_scope_ : 1;
// True if one of the inner scopes or the scope itself calls eval.
bool inner_scope_calls_eval_ : 1;
bool force_context_allocation_ : 1;
// True if it holds 'var' declarations.
bool is_declaration_scope_ : 1;
bool must_use_preparsed_scope_data_ : 1;
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
Variable* NonLocal(const AstRawString* name, VariableMode mode);
@ -590,14 +609,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
void CollectVariableData(PreParsedScopeData* data);
// Construct a scope based on the scope info.
Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
Scope(Zone* zone, const AstRawString* catch_variable_name,
Handle<ScopeInfo> scope_info);
MaybeAssignedFlag maybe_assigned, Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) {
inner_scope->sibling_ = inner_scope_;
@ -686,13 +703,14 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Ignition without ScopeInfo.
Variable* DeclareGeneratorObjectVar(const AstRawString* name);
Variable* DeclarePromiseVar(const AstRawString* name);
Variable* DeclareAsyncGeneratorAwaitVar(const AstRawString* name);
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
bool is_optional, bool is_rest, bool* is_duplicate,
AstValueFactory* ast_value_factory);
AstValueFactory* ast_value_factory, int position);
// Declares that a parameter with the name exists. Creates a Variable and
// returns it if FLAG_preparser_scope_analysis is on.
@ -738,9 +756,16 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* promise_var() const {
DCHECK(is_function_scope());
DCHECK(IsAsyncFunction(function_kind_));
if (IsAsyncGeneratorFunction(function_kind_)) return nullptr;
return GetRareVariable(RareVariable::kPromise);
}
Variable* async_generator_await_var() const {
DCHECK(is_function_scope());
DCHECK(IsAsyncGeneratorFunction(function_kind_));
return GetRareVariable(RareVariable::kAsyncGeneratorAwaitResult);
}
// Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes.
Variable* parameter(int index) const {
@ -805,13 +830,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void HoistSloppyBlockFunctions(AstNodeFactory* factory);
SloppyBlockFunctionMap* sloppy_block_function_map() {
return &sloppy_block_function_map_;
return sloppy_block_function_map_;
}
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static void Analyze(ParseInfo* info, AnalyzeMode mode);
static void Analyze(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
// To be called during parsing. Do just enough scope analysis that we can
// discard the Scope for lazily compiled functions. In particular, this
@ -848,6 +873,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
bool is_skipped_function() const { return is_skipped_function_; }
void set_is_skipped_function(bool is_skipped_function) {
is_skipped_function_ = is_skipped_function;
}
private:
void AllocateParameter(Variable* var, int index);
@ -859,7 +889,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
void AllocateVariables(ParseInfo* info, AnalyzeMode mode);
void AllocateVariables(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
void SetDefaults();
@ -884,11 +914,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
#if DEBUG
bool is_being_lazily_parsed_ : 1;
#endif
bool is_skipped_function_ : 1;
// Parameter list in source order.
ZoneList<Variable*> params_;
// Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap sloppy_block_function_map_;
SloppyBlockFunctionMap* sloppy_block_function_map_;
// Convenience variable.
Variable* receiver_;
// Function variable, if any; function scopes only.
@ -912,7 +943,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
enum class RareVariable {
kThisFunction = offsetof(RareData, this_function),
kGeneratorObject = offsetof(RareData, generator_object),
kPromise = offsetof(RareData, promise)
kPromise = offsetof(RareData, promise),
kAsyncGeneratorAwaitResult = kPromise
};
V8_INLINE RareData* EnsureRareData() {
@ -950,8 +982,7 @@ class ModuleScope final : public DeclarationScope {
// The generated ModuleDescriptor does not preserve all information. In
// particular, its module_requests map will be empty because we no longer need
// the map after parsing.
ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* ast_value_factory);
ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* ast_value_factory);
ModuleDescriptor* module() const {
DCHECK_NOT_NULL(module_descriptor_);

6
deps/v8/src/ast/variables.h

@ -100,6 +100,12 @@ class Variable final : public ZoneObject {
int index() const { return index_; }
bool IsReceiver() const {
DCHECK(IsParameter());
return index_ == -1;
}
bool IsExport() const {
DCHECK_EQ(location(), VariableLocation::MODULE);
DCHECK_NE(index(), 0);

9
deps/v8/src/background-parsing-task.cc

@ -29,12 +29,11 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
ParseInfo* info = new ParseInfo(isolate->allocator());
info->InitFromIsolate(isolate);
info->set_toplevel();
source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding);
info->set_hash_seed(isolate->heap()->HashSeed());
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
info->set_allow_lazy_parsing();
@ -58,11 +57,6 @@ void BackgroundParsingTask::Run() {
uintptr_t stack_limit = GetCurrentStackPosition() - stack_size_ * KB;
source_->parser->set_stack_limit(stack_limit);
// Nullify the Isolate temporarily so that the background parser doesn't
// accidentally use it.
Isolate* isolate = source_->info->isolate();
source_->info->set_isolate(nullptr);
source_->parser->ParseOnBackground(source_->info.get());
if (script_data_ != nullptr) {
@ -73,7 +67,6 @@ void BackgroundParsingTask::Run() {
delete script_data_;
script_data_ = nullptr;
}
source_->info->set_isolate(isolate);
}
} // namespace internal
} // namespace v8

12
deps/v8/src/bailout-reason.h

@ -58,6 +58,7 @@ namespace internal {
"Encountered a do-expression with unmodelable control statements") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kDynamicImport, "Dynamic module import") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
@ -119,8 +120,6 @@ namespace internal {
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
@ -142,7 +141,6 @@ namespace internal {
"Not enough virtual registers (regalloc)") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
@ -155,13 +153,10 @@ namespace internal {
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotAReceiver, "Operand is not a receiver") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
V(kOperandNotANumber, "Operand not a number") \
V(kObjectTagged, "The object is tagged") \
V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \
@ -237,8 +232,9 @@ namespace internal {
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
V(kUnexpectedTestTypeofLiteralFlag, \
"Unexpected literal flag for TestTypeof bytecode") \
V(kUnexpectedRegExpExecCall, "Unexpected call to the RegExpExecStub") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \

3
deps/v8/src/base/cpu.cc

@ -596,7 +596,10 @@ CPU::CPU()
CPUInfo cpu_info;
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
char* ASEs = cpu_info.ExtractField("ASEs implemented");
has_msa_ = HasListItem(ASEs, "msa");
delete[] cpu_model;
delete[] ASEs;
#ifdef V8_HOST_ARCH_MIPS
is_fp64_mode_ = __detect_fp64_mode();
architecture_ = __detect_mips_arch_revision();

2
deps/v8/src/base/cpu.h

@ -113,6 +113,7 @@ class V8_BASE_EXPORT CPU final {
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
bool has_msa() const { return has_msa_; }
private:
char vendor_[13];
@ -154,6 +155,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3_d32_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
bool has_msa_;
};
} // namespace base

2
deps/v8/src/base/debug/stack_trace.h

@ -38,7 +38,7 @@ V8_BASE_EXPORT void DisableSignalStackDump();
// A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
// can later see where the given object was created from.
class StackTrace {
class V8_BASE_EXPORT StackTrace {
public:
// Creates a stacktrace from the current location.
StackTrace();

7
deps/v8/src/base/iterator.h

@ -26,9 +26,10 @@ class iterator_range {
typename std::iterator_traits<iterator>::difference_type difference_type;
iterator_range() : begin_(), end_() {}
template <typename ForwardIterator2>
iterator_range(ForwardIterator2 const& begin, ForwardIterator2 const& end)
: begin_(begin), end_(end) {}
template <typename ForwardIterator1, typename ForwardIterator2>
iterator_range(ForwardIterator1&& begin, ForwardIterator2&& end)
: begin_(std::forward<ForwardIterator1>(begin)),
end_(std::forward<ForwardIterator2>(end)) {}
iterator begin() { return begin_; }
iterator end() { return end_; }

16
deps/v8/src/base/logging.cc

@ -4,6 +4,7 @@
#include "src/base/logging.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
@ -13,6 +14,16 @@
namespace v8 {
namespace base {
namespace {
void (*g_print_stack_trace)() = nullptr;
} // namespace
void SetPrintStackTrace(void (*print_stack_trace)()) {
g_print_stack_trace = print_stack_trace;
}
// Explicit instantiations for commonly used comparisons.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \
template std::string* MakeCheckOpString<type, type>(type, type, char const*);
@ -57,11 +68,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
va_end(arguments);
v8::base::OS::PrintError("\n#\n");
v8::base::debug::StackTrace trace;
trace.Print();
if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
fflush(stderr);
// Avoid dumping stack trace on abort signal.
v8::base::debug::DisableSignalStackDump();
v8::base::OS::Abort();
}

3
deps/v8/src/base/logging.h

@ -37,6 +37,9 @@ extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
namespace v8 {
namespace base {
// Overwrite the default function that prints a stack trace.
V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by DEBUG, so the check will be executed regardless of
// compilation mode.

2
deps/v8/src/base/platform/mutex.cc

@ -104,7 +104,7 @@ static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
return TryEnterCriticalSection(cs);
return TryEnterCriticalSection(cs) != FALSE;
}
#endif // V8_OS_POSIX

15
deps/v8/src/base/platform/platform-aix.cc

@ -29,9 +29,9 @@
#undef MAP_TYPE
#include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
@ -42,8 +42,15 @@ static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
return mmap(addr, len, prot, flags, fildes, off);
}
class AIXTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
double LocalTimeOffset() override;
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
~AIXTimezoneCache() override {}
};
const char* AIXTimezoneCache::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time / msPerSecond));
struct tm tm;
@ -52,8 +59,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
return tzname[0]; // The location of the timezone string on AIX.
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
double AIXTimezoneCache::LocalTimeOffset() {
// On AIX, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);
@ -63,6 +69,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) {
const size_t msize = RoundUp(requested, getpagesize());

13
deps/v8/src/base/platform/platform-cygwin.cc

@ -19,14 +19,22 @@
#undef MAP_TYPE
#include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
#include "src/base/win32-headers.h"
namespace v8 {
namespace base {
class CygwinTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double LocalTimeOffset() override;
~CygwinTimezoneCache() override {}
};
const char* CygwinTimezoneCache::LocalTimezone(double time) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
@ -35,8 +43,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
return tzname[0]; // The location of the timezone string on Cygwin.
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
double CygwinTimezoneCache::LocalTimeOffset() {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);

23
deps/v8/src/base/platform/platform-freebsd.cc

@ -29,32 +29,13 @@
#undef MAP_TYPE
#include "src/base/macros.h"
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"
namespace v8 {
namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
void* OS::Allocate(const size_t requested,
size_t* allocated,

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save