Browse Source

deps: update V8 to 6.3.221

canary-base
Node.js Jenkins CI 8 years ago
parent
commit
fb3e365592
  1. 9
      deps/v8/.editorconfig
  2. 2
      deps/v8/.gitignore
  3. 4
      deps/v8/AUTHORS
  4. 238
      deps/v8/BUILD.gn
  5. 3445
      deps/v8/ChangeLog
  6. 20
      deps/v8/DEPS
  7. 4
      deps/v8/Makefile
  8. 2
      deps/v8/OWNERS
  9. 72
      deps/v8/PRESUBMIT.py
  10. 2
      deps/v8/benchmarks/deltablue.js
  11. 16
      deps/v8/gni/v8.gni
  12. 21
      deps/v8/gypfiles/features.gypi
  13. 2
      deps/v8/gypfiles/landmine_utils.py
  14. 2
      deps/v8/gypfiles/standalone.gypi
  15. 92
      deps/v8/include/v8-platform.h
  16. 7
      deps/v8/include/v8-profiler.h
  17. 6
      deps/v8/include/v8-version.h
  18. 237
      deps/v8/include/v8.h
  19. 45
      deps/v8/infra/mb/mb_config.pyl
  20. 42
      deps/v8/samples/hello-world.cc
  21. 53
      deps/v8/samples/process.cc
  22. 19
      deps/v8/samples/shell.cc
  23. 105
      deps/v8/src/accessors.cc
  24. 25
      deps/v8/src/address-map.h
  25. 251
      deps/v8/src/allocation.cc
  26. 128
      deps/v8/src/allocation.h
  27. 5
      deps/v8/src/api-arguments-inl.h
  28. 22
      deps/v8/src/api-arguments.h
  29. 5
      deps/v8/src/api-natives.cc
  30. 549
      deps/v8/src/api.cc
  31. 14
      deps/v8/src/api.h
  32. 122
      deps/v8/src/arm/assembler-arm-inl.h
  33. 287
      deps/v8/src/arm/assembler-arm.cc
  34. 514
      deps/v8/src/arm/assembler-arm.h
  35. 1236
      deps/v8/src/arm/code-stubs-arm.cc
  36. 56
      deps/v8/src/arm/code-stubs-arm.h
  37. 79
      deps/v8/src/arm/codegen-arm.cc
  38. 83
      deps/v8/src/arm/deoptimizer-arm.cc
  39. 6
      deps/v8/src/arm/disasm-arm.cc
  40. 8
      deps/v8/src/arm/eh-frame-arm.cc
  41. 12
      deps/v8/src/arm/frame-constants-arm.cc
  42. 48
      deps/v8/src/arm/frame-constants-arm.h
  43. 117
      deps/v8/src/arm/frames-arm.h
  44. 120
      deps/v8/src/arm/interface-descriptors-arm.cc
  45. 1014
      deps/v8/src/arm/macro-assembler-arm.cc
  46. 392
      deps/v8/src/arm/macro-assembler-arm.h
  47. 7
      deps/v8/src/arm/simulator-arm.cc
  48. 2
      deps/v8/src/arm/simulator-arm.h
  49. 259
      deps/v8/src/arm64/assembler-arm64-inl.h
  50. 45
      deps/v8/src/arm64/assembler-arm64.cc
  51. 371
      deps/v8/src/arm64/assembler-arm64.h
  52. 1452
      deps/v8/src/arm64/code-stubs-arm64.cc
  53. 99
      deps/v8/src/arm64/code-stubs-arm64.h
  54. 71
      deps/v8/src/arm64/codegen-arm64.cc
  55. 2
      deps/v8/src/arm64/constants-arm64.h
  56. 78
      deps/v8/src/arm64/deoptimizer-arm64.cc
  57. 57
      deps/v8/src/arm64/disasm-arm64.cc
  58. 10
      deps/v8/src/arm64/eh-frame-arm64.cc
  59. 14
      deps/v8/src/arm64/frame-constants-arm64.cc
  60. 26
      deps/v8/src/arm64/frame-constants-arm64.h
  61. 46
      deps/v8/src/arm64/instructions-arm64-constants.cc
  62. 3
      deps/v8/src/arm64/instructions-arm64.cc
  63. 44
      deps/v8/src/arm64/instructions-arm64.h
  64. 138
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  65. 34
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  66. 859
      deps/v8/src/arm64/macro-assembler-arm64.cc
  67. 241
      deps/v8/src/arm64/macro-assembler-arm64.h
  68. 8
      deps/v8/src/arm64/simulator-arm64.cc
  69. 1
      deps/v8/src/arm64/utils-arm64.cc
  70. 1
      deps/v8/src/asmjs/OWNERS
  71. 347
      deps/v8/src/asmjs/asm-js.cc
  72. 7
      deps/v8/src/asmjs/asm-js.h
  73. 133
      deps/v8/src/asmjs/asm-parser.cc
  74. 13
      deps/v8/src/asmjs/asm-parser.h
  75. 14
      deps/v8/src/asmjs/asm-scanner.cc
  76. 8
      deps/v8/src/asmjs/asm-scanner.h
  77. 45
      deps/v8/src/asmjs/asm-types.cc
  78. 108
      deps/v8/src/asmjs/asm-types.h
  79. 262
      deps/v8/src/assembler.cc
  80. 228
      deps/v8/src/assembler.h
  81. 3
      deps/v8/src/assert-scope.cc
  82. 24
      deps/v8/src/ast/ast-expression-rewriter.cc
  83. 183
      deps/v8/src/ast/ast-numbering.cc
  84. 37
      deps/v8/src/ast/ast-source-ranges.h
  85. 17
      deps/v8/src/ast/ast-traversal-visitor.h
  86. 49
      deps/v8/src/ast/ast-value-factory.cc
  87. 58
      deps/v8/src/ast/ast-value-factory.h
  88. 202
      deps/v8/src/ast/ast.cc
  89. 733
      deps/v8/src/ast/ast.h
  90. 2
      deps/v8/src/ast/modules.cc
  91. 106
      deps/v8/src/ast/prettyprinter.cc
  92. 15
      deps/v8/src/ast/prettyprinter.h
  93. 236
      deps/v8/src/ast/scopes.cc
  94. 40
      deps/v8/src/ast/scopes.h
  95. 25
      deps/v8/src/background-parsing-task.cc
  96. 105
      deps/v8/src/bailout-reason.h
  97. 8
      deps/v8/src/base.isolate
  98. 1
      deps/v8/src/base/DEPS
  99. 265
      deps/v8/src/base/atomic-utils.h
  100. 21
      deps/v8/src/base/atomicops.h

9
deps/v8/.editorconfig

@ -0,0 +1,9 @@
root = true
[*]
charset = utf-8
indent_style = space
indent_size = 2
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true

2
deps/v8/.gitignore

@ -32,6 +32,7 @@
.project
.pydevproject
.settings
.vscode
/_*
/build
/buildtools
@ -67,6 +68,7 @@
!/testing/gtest/include/gtest/gtest_prod.h
/third_party/*
!/third_party/binutils
!/third_party/eu-strip
!/third_party/inspector_protocol
/tools/clang
/tools/gcmole/gcmole-tools

4
deps/v8/AUTHORS

@ -28,6 +28,8 @@ Amazon, Inc <*@amazon.com>
ST Microelectronics <*@st.com>
Yandex LLC <*@yandex-team.ru>
StrongLoop, Inc. <*@strongloop.com>
Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@ -88,6 +90,7 @@ Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com>
Marcin Cieślak <saper@marcincieslak.com>
Mateusz Czeladka <mateusz.szczap@gmail.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
@ -106,6 +109,7 @@ Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
Paul Lind <plind44@gmail.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>

238
deps/v8/BUILD.gn

@ -6,6 +6,7 @@ import("//build/config/android/config.gni")
import("//build/config/arm.gni")
import("//build/config/dcheck_always_on.gni")
import("//build/config/host_byteorder.gni")
import("//build/config/jumbo.gni")
import("//build/config/mips.gni")
import("//build/config/sanitizers/sanitizers.gni")
@ -52,11 +53,14 @@ declare_args() {
v8_enable_vtunejit = false
# Sets -dENABLE_HANDLE_ZAPPING.
v8_enable_handle_zapping = is_debug
v8_enable_handle_zapping = true
# Enable slow dchecks.
v8_enable_slow_dchecks = false
# Enable fast mksnapshot runs.
v8_enable_fast_mksnapshot = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@ -77,7 +81,10 @@ declare_args() {
v8_enable_trace_ignition = false
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = false
v8_enable_concurrent_marking = ""
# Sets -dV8_CSA_WRITE_BARRIER
v8_enable_csa_write_barrier = true
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
@ -100,6 +107,9 @@ declare_args() {
# Similar to the ARM hard float ABI but on MIPS.
v8_use_mips_abi_hardfloat = true
# Controls the threshold for on-heap/off-heap Typed Arrays.
v8_typed_array_max_size_in_heap = 64
# List of extra files to snapshot. They will be snapshotted in order so
# if files export symbols used by later files, they should go first.
#
@ -140,6 +150,13 @@ if (v8_enable_v8_checks == "") {
if (v8_check_microtasks_scopes_consistency == "") {
v8_check_microtasks_scopes_consistency = is_debug || dcheck_always_on
}
if (v8_enable_concurrent_marking == "") {
if (target_cpu == "x86" || target_cpu == "x64") {
v8_enable_concurrent_marking = true
} else {
v8_enable_concurrent_marking = false
}
}
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
@ -195,8 +212,12 @@ config("libsampler_config") {
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
defines = []
if (is_component_build) {
defines = [ "USING_V8_SHARED" ]
defines += [ "USING_V8_SHARED" ]
}
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ] # Used in "include/v8.h".
}
include_dirs = [
"include",
@ -224,6 +245,8 @@ config("features") {
defines +=
[ "V8_PROMISE_INTERNAL_FIELD_COUNT=${v8_promise_internal_field_count}" ]
}
defines +=
[ "V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=${v8_typed_array_max_size_in_heap}" ]
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
@ -278,6 +301,9 @@ config("features") {
if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ]
}
if (v8_enable_csa_write_barrier) {
defines += [ "V8_CSA_WRITE_BARRIER" ]
}
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
@ -541,15 +567,12 @@ action("js2c") {
"src/js/macros.py",
"src/messages.h",
"src/js/prologue.js",
"src/js/max-min.js",
"src/js/v8natives.js",
"src/js/array.js",
"src/js/string.js",
"src/js/typedarray.js",
"src/js/collection.js",
"src/js/weak-collection.js",
"src/js/messages.js",
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
"src/debug/mirrors.js",
@ -795,6 +818,13 @@ action("run_mksnapshot") {
sources += [ v8_embed_script ]
args += [ rebase_path(v8_embed_script, root_build_dir) ]
}
if (v8_enable_fast_mksnapshot) {
args += [
"--no-turbo-rewrite-far-jumps",
"--no-turbo-verify-allocation",
]
}
}
action("v8_dump_build_config") {
@ -814,6 +844,7 @@ action("v8_dump_build_config") {
"is_gcov_coverage=$is_gcov_coverage",
"is_msan=$is_msan",
"is_tsan=$is_tsan",
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
@ -838,7 +869,7 @@ source_set("v8_maybe_snapshot") {
} else {
# Ignore v8_use_external_startup_data setting if no snapshot is used.
public_deps = [
":v8_builtins_setup",
":v8_init",
":v8_nosnapshot",
]
}
@ -861,6 +892,15 @@ v8_source_set("v8_nosnapshot") {
"src/snapshot/snapshot-empty.cc",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
# Generated source, contains same variable names as libraries.cc
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
]
}
configs = [ ":internal_config" ]
}
@ -892,6 +932,15 @@ v8_source_set("v8_snapshot") {
"src/setup-isolate-deserialize.cc",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
# Generated source, contains same variable names as libraries.cc
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
]
}
configs = [ ":internal_config" ]
}
@ -920,7 +969,7 @@ if (v8_use_external_startup_data) {
}
}
v8_source_set("v8_builtins_generators") {
v8_source_set("v8_initializers") {
visibility = [
":*",
"test/cctest:*",
@ -949,11 +998,8 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-conversion-gen.h",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-debug-gen.cc",
"src/builtins/builtins-forin-gen.cc",
"src/builtins/builtins-forin-gen.h",
"src/builtins/builtins-function-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
@ -970,6 +1016,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
"src/builtins/builtins-proxy-gen.h",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
@ -980,6 +1027,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
"src/builtins/setup-builtins-internal.cc",
"src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/binary-op-assembler.cc",
@ -996,6 +1044,18 @@ v8_source_set("v8_builtins_generators") {
"src/interpreter/setup-interpreter.h",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-async-generator-gen.cc",
# This source file takes an unusually large amount of time to
# compile. Build it separately to avoid bottlenecks.
"src/builtins/builtins-regexp-gen.cc",
]
}
if (v8_current_cpu == "x86") {
sources += [
### gcmole(arch:ia32) ###
@ -1045,11 +1105,11 @@ v8_source_set("v8_builtins_generators") {
configs = [ ":internal_config" ]
}
v8_source_set("v8_builtins_setup") {
v8_source_set("v8_init") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":v8_builtins_generators",
":v8_initializers",
]
sources = [
@ -1176,9 +1236,11 @@ v8_source_set("v8_base") {
"src/bit-vector.h",
"src/bootstrapper.cc",
"src/bootstrapper.h",
"src/boxed-float.h",
"src/builtins/builtins-api.cc",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-bigint.cc",
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
@ -1246,6 +1308,8 @@ v8_source_set("v8_base") {
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler-dispatcher/unoptimized-compile-job.cc",
"src/compiler-dispatcher/unoptimized-compile-job.h",
"src/compiler.cc",
"src/compiler.h",
"src/compiler/access-builder.cc",
@ -1254,10 +1318,6 @@ v8_source_set("v8_base") {
"src/compiler/access-info.h",
"src/compiler/all-nodes.cc",
"src/compiler/all-nodes.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
"src/compiler/ast-loop-assignment-analyzer.cc",
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc",
@ -1269,8 +1329,6 @@ v8_source_set("v8_base") {
"src/compiler/bytecode-liveness-map.cc",
"src/compiler/bytecode-liveness-map.h",
"src/compiler/c-linkage.cc",
"src/compiler/check-elimination.cc",
"src/compiler/check-elimination.h",
"src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h",
"src/compiler/code-assembler.cc",
@ -1286,8 +1344,6 @@ v8_source_set("v8_base") {
"src/compiler/common-operator.h",
"src/compiler/compiler-source-position-table.cc",
"src/compiler/compiler-source-position-table.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.cc",
"src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc",
@ -1337,8 +1393,6 @@ v8_source_set("v8_base") {
"src/compiler/js-context-specialization.h",
"src/compiler/js-create-lowering.cc",
"src/compiler/js-create-lowering.h",
"src/compiler/js-frame-specialization.cc",
"src/compiler/js-frame-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc",
@ -1402,6 +1456,7 @@ v8_source_set("v8_base") {
"src/compiler/operator.h",
"src/compiler/osr.cc",
"src/compiler/osr.h",
"src/compiler/persistent-map.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/pipeline.cc",
@ -1475,8 +1530,14 @@ v8_source_set("v8_base") {
"src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
"src/debug/debug-interface.h",
"src/debug/debug-scope-iterator.cc",
"src/debug/debug-scope-iterator.h",
"src/debug/debug-scopes.cc",
"src/debug/debug-scopes.h",
"src/debug/debug-stack-trace-iterator.cc",
"src/debug/debug-stack-trace-iterator.h",
"src/debug/debug-type-profile.cc",
"src/debug/debug-type-profile.h",
"src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/interface-types.h",
@ -1516,6 +1577,7 @@ v8_source_set("v8_base") {
"src/extensions/trigger-failure-extension.h",
"src/external-reference-table.cc",
"src/external-reference-table.h",
"src/factory-inl.h",
"src/factory.cc",
"src/factory.h",
"src/fast-dtoa.cc",
@ -1529,18 +1591,15 @@ v8_source_set("v8_base") {
"src/field-index.h",
"src/field-type.cc",
"src/field-type.h",
"src/find-and-replace-pattern.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
"src/flags.cc",
"src/flags.h",
"src/float.h",
"src/frame-constants.h",
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
"src/full-codegen/full-codegen.cc",
"src/full-codegen/full-codegen.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
@ -1555,6 +1614,7 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker-inl.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
"src/heap/barrier.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/concurrent-marking.cc",
@ -1573,6 +1633,9 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
@ -1593,8 +1656,6 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/sequential-marking-deque.cc",
"src/heap/sequential-marking-deque.h",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
@ -1610,10 +1671,9 @@ v8_source_set("v8_base") {
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
"src/ic/handler-configuration-inl.h",
"src/ic/handler-configuration.cc",
"src/ic/handler-configuration.h",
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
"src/ic/ic-stats.cc",
"src/ic/ic-stats.h",
"src/ic/ic.cc",
@ -1719,6 +1779,9 @@ v8_source_set("v8_base") {
"src/objects.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
"src/objects/bigint-inl.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/code-cache-inl.h",
"src/objects/code-cache.h",
"src/objects/compilation-cache-inl.h",
@ -1738,11 +1801,15 @@ v8_source_set("v8_base") {
"src/objects/literal-objects.h",
"src/objects/map-inl.h",
"src/objects/map.h",
"src/objects/module-info.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
"src/objects/module.h",
"src/objects/name-inl.h",
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
@ -1753,14 +1820,16 @@ v8_source_set("v8_base") {
"src/objects/string-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
"src/objects/template-objects.cc",
"src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
"src/parsing/expression-scope-reparenter.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
"src/parsing/parameter-initializer-rewriter.cc",
"src/parsing/parameter-initializer-rewriter.h",
"src/parsing/parse-info.cc",
"src/parsing/parse-info.h",
"src/parsing/parser-base.h",
@ -1844,10 +1913,12 @@ v8_source_set("v8_base") {
"src/regexp/regexp-utils.h",
"src/register-configuration.cc",
"src/register-configuration.h",
"src/reglist.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-bigint.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc",
@ -1885,13 +1956,22 @@ v8_source_set("v8_base") {
"src/setup-isolate.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
"src/snapshot/builtin-deserializer.cc",
"src/snapshot/builtin-deserializer.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",
"src/snapshot/natives.h",
"src/snapshot/object-deserializer.cc",
"src/snapshot/object-deserializer.h",
"src/snapshot/partial-deserializer.cc",
"src/snapshot/partial-deserializer.h",
"src/snapshot/partial-serializer.cc",
"src/snapshot/partial-serializer.h",
"src/snapshot/serializer-common.cc",
@ -1902,6 +1982,8 @@ v8_source_set("v8_base") {
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/snapshot/snapshot.h",
"src/snapshot/startup-deserializer.cc",
"src/snapshot/startup-deserializer.h",
"src/snapshot/startup-serializer.cc",
"src/snapshot/startup-serializer.h",
"src/source-position-table.cc",
@ -1981,11 +2063,15 @@ v8_source_set("v8_base") {
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
"src/wasm/streaming-decoder.h",
"src/wasm/wasm-api.cc",
"src/wasm/wasm-api.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
"src/wasm/wasm-heap.cc",
"src/wasm/wasm-heap.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
@ -1995,6 +2081,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",
"src/wasm/wasm-module.h",
"src/wasm/wasm-objects-inl.h",
"src/wasm/wasm-objects.cc",
"src/wasm/wasm-objects.h",
"src/wasm/wasm-opcodes.cc",
@ -2017,6 +2104,22 @@ v8_source_set("v8_base") {
"src/zone/zone.h",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
"src/profiler/heap-snapshot-generator.cc", # Macro clash in mman-linux.h
# These source files take an unusually large amount of time to
# compile. Build them separately to avoid bottlenecks.
"src/api.cc",
"src/code-stub-assembler.cc",
"src/elements.cc",
"src/heap/heap.cc",
"src/objects.cc",
"src/parsing/parser.cc",
]
}
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/compiler/ia32/code-generator-ia32.cc",
@ -2024,7 +2127,6 @@ v8_source_set("v8_base") {
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
@ -2035,8 +2137,8 @@ v8_source_set("v8_base") {
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/frame-constants-ia32.cc",
"src/ia32/frame-constants-ia32.h",
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
@ -2045,7 +2147,6 @@ v8_source_set("v8_base") {
"src/ia32/sse-instr.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
@ -2058,10 +2159,8 @@ v8_source_set("v8_base") {
"src/compiler/x64/unwinding-info-writer-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
@ -2076,8 +2175,8 @@ v8_source_set("v8_base") {
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
"src/x64/eh-frame-x64.cc",
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/frame-constants-x64.cc",
"src/x64/frame-constants-x64.h",
"src/x64/interface-descriptors-x64.cc",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
@ -2103,8 +2202,8 @@ v8_source_set("v8_base") {
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
"src/arm/eh-frame-arm.cc",
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/frame-constants-arm.cc",
"src/arm/frame-constants-arm.h",
"src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
"src/arm/macro-assembler-arm.cc",
@ -2118,10 +2217,8 @@ v8_source_set("v8_base") {
"src/compiler/arm/unwinding-info-writer-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
"src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
]
@ -2143,8 +2240,9 @@ v8_source_set("v8_base") {
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
"src/arm64/eh-frame-arm64.cc",
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/frame-constants-arm64.cc",
"src/arm64/frame-constants-arm64.h",
"src/arm64/instructions-arm64-constants.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
@ -2166,13 +2264,18 @@ v8_source_set("v8_base") {
"src/compiler/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
if (use_jumbo_build) {
jumbo_excluded_sources += [
# TODO(mostynb@opera.com): fix this code so it doesn't need
# to be excluded, see the comments inside.
"src/arm64/instructions-arm64-constants.cc",
]
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/compiler/mips/code-generator-mips.cc",
@ -2180,10 +2283,8 @@ v8_source_set("v8_base") {
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
@ -2196,8 +2297,8 @@ v8_source_set("v8_base") {
"src/mips/cpu-mips.cc",
"src/mips/deoptimizer-mips.cc",
"src/mips/disasm-mips.cc",
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
"src/mips/frame-constants-mips.cc",
"src/mips/frame-constants-mips.h",
"src/mips/interface-descriptors-mips.cc",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
@ -2213,10 +2314,8 @@ v8_source_set("v8_base") {
"src/compiler/mips64/instruction-scheduler-mips64.cc",
"src/compiler/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
@ -2229,8 +2328,8 @@ v8_source_set("v8_base") {
"src/mips64/cpu-mips64.cc",
"src/mips64/deoptimizer-mips64.cc",
"src/mips64/disasm-mips64.cc",
"src/mips64/frames-mips64.cc",
"src/mips64/frames-mips64.h",
"src/mips64/frame-constants-mips64.cc",
"src/mips64/frame-constants-mips64.h",
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
@ -2246,10 +2345,8 @@ v8_source_set("v8_base") {
"src/compiler/ppc/instruction-scheduler-ppc.cc",
"src/compiler/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
"src/full-codegen/ppc/full-codegen-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc",
"src/ic/ppc/handler-compiler-ppc.cc",
"src/ic/ppc/ic-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
@ -2262,8 +2359,8 @@ v8_source_set("v8_base") {
"src/ppc/cpu-ppc.cc",
"src/ppc/deoptimizer-ppc.cc",
"src/ppc/disasm-ppc.cc",
"src/ppc/frames-ppc.cc",
"src/ppc/frames-ppc.h",
"src/ppc/frame-constants-ppc.cc",
"src/ppc/frame-constants-ppc.h",
"src/ppc/interface-descriptors-ppc.cc",
"src/ppc/macro-assembler-ppc.cc",
"src/ppc/macro-assembler-ppc.h",
@ -2279,10 +2376,8 @@ v8_source_set("v8_base") {
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
"src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc",
"src/ic/s390/ic-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
@ -2297,8 +2392,8 @@ v8_source_set("v8_base") {
"src/s390/cpu-s390.cc",
"src/s390/deoptimizer-s390.cc",
"src/s390/disasm-s390.cc",
"src/s390/frames-s390.cc",
"src/s390/frames-s390.h",
"src/s390/frame-constants-s390.cc",
"src/s390/frame-constants-s390.h",
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
@ -2350,7 +2445,7 @@ v8_component("v8_libbase") {
"src/base/atomicops.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/atomicops_internals_std.h",
"src/base/base-export.h",
"src/base/bits.cc",
"src/base/bits.h",
@ -2401,6 +2496,7 @@ v8_component("v8_libbase") {
"src/base/sys-info.h",
"src/base/template-utils.h",
"src/base/timezone-cache.h",
"src/base/tsan.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
@ -2583,7 +2679,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [
":v8_base",
":v8_builtins_setup",
":v8_init",
":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
@ -2696,7 +2792,7 @@ if (is_component_build) {
]
if (v8_use_snapshot) {
public_deps += [ ":v8_builtins_generators" ]
public_deps += [ ":v8_initializers" ]
}
configs = [ ":internal_config" ]
@ -2722,7 +2818,7 @@ if (is_component_build) {
]
if (v8_use_snapshot) {
public_deps += [ ":v8_builtins_generators" ]
public_deps += [ ":v8_initializers" ]
}
public_configs = [ ":external_config" ]

3445
deps/v8/ChangeLog

File diff suppressed because it is too large

20
deps/v8/DEPS

@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "1808a907ce42f13b224c263e9843d718fc6d9c39",
Var("chromium_url") + "/chromium/src/build.git" + "@" + "898597f665820182fc979b13c1b2e6de06a608c4",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "eb296f67da078ec01f5e3a9ea9cdc6d26d680161",
Var("chromium_url") + "/external/gyp.git" + "@" + "d61a9397e668fa9843c4aa7da9e79460fe590bfb",
"v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "dfa798fe694702b43a3debc3290761f22b1acaf8",
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "08cb956852a5ccdba7f9c941728bb833529ba3c6",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "644afd349826cb68204226a16c38bde13abe9c3c",
"v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5ad14542a6a74dd914f067b948c5d3e8d170396b",
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "cbc33b9c0a9d1bb913895a4319a742c504a2d541",
"v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "65d1d42a5df6c0a563a6fdfa58a135679185e5d9",
"v8/third_party/jinja2":
@ -24,7 +24,7 @@ deps = {
"v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
"v8/tools/swarming_client":
Var('chromium_url') + '/external/swarming.client.git' + '@' + "a56c2b39ca23bdf41458421a7f825ddbf3f43f28",
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + "5e8001d9a710121ce7a68efd0804430a34b4f9e4",
"v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -34,21 +34,21 @@ deps = {
"v8/test/mozilla/data":
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "1b911a8f8abf4cb63882cfbe72dcd4c82bb8ad91",
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "290799bbeeba86245a355894b6ff2bb33d946d9e",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "844603c1fcd47f578931b3ccd583e19f816a3842",
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "cf5e2ed6a9fe35e792587a111a4b2a515deff772",
"v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "aadd3a340c78e53078a7bb6c17cc30f105c2960c",
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "4f1d3114157e6459d8a06e7d8fcc8fc90288cd85",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "e9d4018e149d50172ed462a7c21137aa915940ec",
Var("chromium_url") + "/android_tools.git" + "@" + "aadb2fed04af8606545b0afe4e3060bc1a15fad7",
"v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "44b022b2a09508ec025ae76a26308e89deb2cf69",
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "e3fe21f5029c7d2cd2a83f012375ea9d877733d5",
},
}

4
deps/v8/Makefile

@ -63,6 +63,10 @@ endif
ifeq ($(tracemaps), on)
GYPFLAGS += -Dv8_trace_maps=1
endif
# concurrentmarking=on
ifeq ($(concurrentmarking), on)
GYPFLAGS += -Dv8_enable_concurrent_marking=1
endif
# backtrace=off
ifeq ($(backtrace), off)
GYPFLAGS += -Dv8_enable_backtrace=0

2
deps/v8/OWNERS

@ -7,7 +7,9 @@ bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
eholk@chromium.org
franzih@chromium.org
gdeepti@chromium.org
gsathya@chromium.org
hablich@chromium.org
hpayer@chromium.org

72
deps/v8/PRESUBMIT.py

@ -153,16 +153,17 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
# TODO(mstarzinger): Similar checking should be made available as part of
# tools/presubmit.py (note that tools/check-inline-includes.sh exists).
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
"""Attempts to prevent inclusion of inline headers into normal header
files. This tries to establish a layering where inline headers can be
included by other inline headers or compilation units only."""
file_inclusion_pattern = r'(?!.+-inl\.h).+\.h'
include_directive_pattern = input_api.re.compile(r'#include ".+-inl.h"')
include_warning = (
'You might be including an inline header (e.g. foo-inl.h) within a\n'
'normal header (e.g. bar.h) file. Can you avoid introducing the\n'
'#include? The commit queue will not block on this warning.')
include_error = (
'You are including an inline header (e.g. foo-inl.h) within a normal\n'
'header (e.g. bar.h) file. This violates layering of dependencies.')
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
@ -181,7 +182,7 @@ def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(include_warning, problems)]
return [output_api.PresubmitError(include_error, problems)]
else:
return []
@ -279,6 +280,7 @@ def _CommonChecks(input_api, output_api):
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
return results
@ -337,6 +339,66 @@ def _CheckJSONFiles(input_api, output_api):
return [output_api.PresubmitError(r) for r in results]
def _CheckMacroUndefs(input_api, output_api):
"""
Checks that each #define in a .cc file is eventually followed by an #undef.
TODO(clemensh): This check should eventually be enabled for all cc files via
tools/presubmit.py (https://crbug.com/v8/6811).
"""
def FilterFile(affected_file):
# Skip header files, as they often define type lists which are used in
# other files.
white_list = (r'.+\.cc',r'.+\.cpp',r'.+\.c')
return input_api.FilterSourceFile(affected_file, white_list=white_list)
def TouchesMacros(f):
for line in f.GenerateScmDiff().splitlines():
if not line.startswith('+') and not line.startswith('-'):
continue
if define_pattern.match(line[1:]) or undef_pattern.match(line[1:]):
return True
return False
define_pattern = input_api.re.compile(r'#define (\w+)')
undef_pattern = input_api.re.compile(r'#undef (\w+)')
errors = []
for f in input_api.AffectedFiles(
file_filter=FilterFile, include_deletes=False):
if not TouchesMacros(f):
continue
defined_macros = dict()
with open(f.LocalPath()) as fh:
line_nr = 0
for line in fh:
line_nr += 1
define_match = define_pattern.match(line)
if define_match:
name = define_match.group(1)
defined_macros[name] = line_nr
undef_match = undef_pattern.match(line)
if undef_match:
name = undef_match.group(1)
if not name in defined_macros:
errors.append('{}:{}: Macro named \'{}\' was not defined before.'
.format(f.LocalPath(), line_nr, name))
else:
del defined_macros[name]
for name, line_nr in sorted(defined_macros.items(), key=lambda e: e[1]):
errors.append('{}:{}: Macro missing #undef: {}'
.format(f.LocalPath(), line_nr, name))
if errors:
return [output_api.PresubmitPromptOrNotify(
'Detected mismatches in #define / #undef in the file(s) where you '
'modified preprocessor macros.',
errors)]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))

2
deps/v8/benchmarks/deltablue.js

@ -790,7 +790,7 @@ Plan.prototype.execute = function () {
* In case 1, the added constraint is stronger than the stay
* constraint and values must propagate down the entire length of the
* chain. In case 2, the added constraint is weaker than the stay
* constraint so it cannot be accomodated. The cost in this case is,
* constraint so it cannot be accommodated. The cost in this case is,
* of course, very low. Typical situations lie somewhere between these
* two extremes.
*/

16
deps/v8/gni/v8.gni

@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/jumbo.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("//build/split_static_library.gni")
@ -109,7 +110,11 @@ template("v8_source_set") {
} else if (defined(v8_static_library) && v8_static_library) {
link_target_type = "static_library"
} else {
link_target_type = "source_set"
if (use_jumbo_build) {
link_target_type = "jumbo_source_set"
} else {
link_target_type = "source_set"
}
}
target(link_target_type, target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
@ -120,7 +125,7 @@ template("v8_source_set") {
}
template("v8_header_set") {
source_set(target_name) {
jumbo_source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
@ -151,14 +156,13 @@ template("v8_executable") {
# reasons.
if (is_clang) {
configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
configs += [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
configs +=
[ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
} else {
configs -= [ v8_path_prefix + ":v8_gcov_coverage_cflags" ]
}
}
deps += [
v8_path_prefix + ":v8_dump_build_config",
]
deps += [ v8_path_prefix + ":v8_dump_build_config" ]
}
}

21
deps/v8/gypfiles/features.gypi

@ -29,6 +29,10 @@
{
'variables': {
'variables': {
'v8_target_arch%': '<(target_arch)',
},
'v8_enable_disassembler%': 0,
'v8_promise_internal_field_count%': 0,
@ -76,6 +80,17 @@
# Temporary flag to allow embedders to update their microtasks scopes.
'v8_check_microtasks_scopes_consistency%': 'false',
# Enable concurrent marking.
'conditions': [
['target_arch=="x64" or target_arch=="ia32"', {
'v8_enable_concurrent_marking%': 1,
},{
'v8_enable_concurrent_marking%': 0,
}]
],
# Controls the threshold for on-heap/off-heap Typed Arrays.
'v8_typed_array_max_size_in_heap%': 64,
},
'target_defaults': {
'conditions': [
@ -124,6 +139,9 @@
['v8_check_microtasks_scopes_consistency=="true"', {
'defines': ['V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY',],
}],
['v8_enable_concurrent_marking==1', {
'defines': ['V8_CONCURRENT_MARKING',],
}],
], # conditions
'configurations': {
'DebugBaseCommon': {
@ -139,7 +157,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@ -150,6 +168,7 @@
}, # configurations
'defines': [
'V8_GYP_BUILD',
'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=<(v8_typed_array_max_size_in_heap)',
], # defines
}, # target_defaults
}

2
deps/v8/gypfiles/landmine_utils.py

@ -76,7 +76,7 @@ def distributor():
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Returns a string representing the platform this build is targeted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():

2
deps/v8/gypfiles/standalone.gypi

@ -754,7 +754,7 @@
'-Wno-unused-parameter',
'-pthread',
'-pedantic',
'-Wmissing-field-initializers',
'-Wno-missing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [

92
deps/v8/include/v8-platform.h

@ -132,6 +132,15 @@ class Platform {
virtual ~Platform() = default;
/**
* Enables the embedder to respond in cases where V8 can't allocate large
* blocks of memory. V8 retries the failed allocation once after calling this
* method. On success, execution continues; otherwise V8 exits with a fatal
* error.
* Embedder overrides of this function must NOT call back into V8.
*/
virtual void OnCriticalMemoryPressure() {}
/**
* Gets the number of threads that are used to execute background tasks. Is
* used to estimate the number of tasks a work package should be split into.
@ -195,6 +204,13 @@ class Platform {
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
/**
* Current wall-clock time in milliseconds since epoch.
* This function is expected to return at least millisecond-precision values.
*/
virtual double CurrentClockTimeMillis() = 0;
typedef void (*StackTracePrinter)();
/**
@ -208,79 +224,13 @@ class Platform {
*/
virtual TracingController* GetTracingController() = 0;
// DEPRECATED methods, use TracingController interface instead.
/**
* Called by TRACE_EVENT* macros, don't call this directly.
* The name parameter is a category group for example:
* TRACE_EVENT0("v8,parse", "V8.Parse")
* The pointer returned points to a value with zero or more of the bits
* defined in CategoryGroupEnabledFlags.
**/
virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
static uint8_t no = 0;
return &no;
}
protected:
/**
* Gets the category group name of the given category_enabled_flag pointer.
* Usually used while serliazing TRACE_EVENTs.
**/
virtual const char* GetCategoryGroupName(
const uint8_t* category_enabled_flag) {
static const char dummy[] = "dummy";
return dummy;
}
/**
* Adds a trace event to the platform tracing system. This function call is
* usually the result of a TRACE_* macro from trace_event_common.h when
* tracing and the category of the particular trace are enabled. It is not
* advisable to call this function on its own; it is really only meant to be
* used by the trace macros. The returned handle can be used by
* UpdateTraceEventDuration to update the duration of COMPLETE events.
* Default implementation of current wall-clock time in milliseconds
* since epoch. Useful for implementing |CurrentClockTimeMillis| if
* nothing special needed.
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags) {
return 0;
}
/**
* Adds a trace event to the platform tracing system. This function call is
* usually the result of a TRACE_* macro from trace_event_common.h when
* tracing and the category of the particular trace are enabled. It is not
* advisable to call this function on its own; it is really only meant to be
* used by the trace macros. The returned handle can be used by
* UpdateTraceEventDuration to update the duration of COMPLETE events.
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) {
return AddTraceEvent(phase, category_enabled_flag, name, scope, id, bind_id,
num_args, arg_names, arg_types, arg_values, flags);
}
/**
* Sets the duration field of a COMPLETE trace event. It must be called with
* the handle returned from AddTraceEvent().
**/
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) {}
typedef v8::TracingController::TraceStateObserver TraceStateObserver;
/** Adds tracing state change observer. */
virtual void AddTraceStateObserver(TraceStateObserver*) {}
/** Removes tracing state change observer. */
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
static double SystemClockTimeMillis();
};
} // namespace v8

7
deps/v8/include/v8-profiler.h

@ -389,7 +389,7 @@ class V8_EXPORT HeapGraphNode {
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8, // Native object (not from V8 heap).
kSynthetic = 9, // Synthetic object, usualy used for grouping
kSynthetic = 9, // Synthetic object, usually used for grouping
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
@ -784,7 +784,7 @@ class V8_EXPORT HeapProfiler {
/**
* Returns the sampled profile of allocations allocated (and still live) since
* StartSamplingHeapProfiler was called. The ownership of the pointer is
* transfered to the caller. Returns nullptr if sampling heap profiler is not
* transferred to the caller. Returns nullptr if sampling heap profiler is not
* active.
*/
AllocationProfile* GetAllocationProfile();
@ -809,9 +809,6 @@ class V8_EXPORT HeapProfiler {
*/
static const uint16_t kPersistentHandleNoClassId = 0;
/** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize();
private:
HeapProfiler();
~HeapProfiler();

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 1
#define V8_BUILD_NUMBER 534
#define V8_PATCH_LEVEL 38
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 221
#define V8_PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

237
deps/v8/include/v8.h

@ -104,6 +104,7 @@ class String;
class StringObject;
class Symbol;
class SymbolObject;
class PrimitiveArray;
class Private;
class Uint32;
class Utils;
@ -139,6 +140,7 @@ template<typename T> class ReturnValue;
namespace internal {
class Arguments;
class DeferredHandles;
class Heap;
class HeapObject;
class Isolate;
@ -148,6 +150,10 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
namespace wasm {
class StreamingDecoder;
} // namespace wasm
} // namespace internal
namespace debug {
@ -977,6 +983,48 @@ class V8_EXPORT Data {
Data();
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* A container type that holds relevant metadata for module loading.
*
* This is passed back to the embedder as part of
* HostImportModuleDynamicallyCallback for module loading.
*/
class V8_EXPORT ScriptOrModule {
public:
/**
* The name that was passed by the embedder as ResourceName to the
* ScriptOrigin. This can be either a v8::String or v8::Undefined.
*/
Local<Value> GetResourceName();
/**
* The options that were passed by the embedder as HostDefinedOptions to
* the ScriptOrigin.
*/
Local<PrimitiveArray> GetHostDefinedOptions();
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* An array to hold Primitive values. This is used by the embedder to
* pass host defined options to the ScriptOptions during compilation.
*
* This is passed back to the embedder as part of
* HostImportModuleDynamicallyCallback for module loading.
*
*/
class V8_EXPORT PrimitiveArray {
public:
static Local<PrimitiveArray> New(Isolate* isolate, int length);
int Length() const;
void Set(int index, Local<Primitive> item);
Local<Primitive> Get(int index);
};
/**
* The optional attributes of ScriptOrigin.
@ -1026,13 +1074,15 @@ class ScriptOrigin {
Local<Value> source_map_url = Local<Value>(),
Local<Boolean> resource_is_opaque = Local<Boolean>(),
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>());
Local<Boolean> is_module = Local<Boolean>(),
Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
V8_INLINE Local<Value> ResourceName() const;
V8_INLINE Local<Integer> ResourceLineOffset() const;
V8_INLINE Local<Integer> ResourceColumnOffset() const;
V8_INLINE Local<Integer> ScriptID() const;
V8_INLINE Local<Value> SourceMapUrl() const;
V8_INLINE Local<PrimitiveArray> HostDefinedOptions() const;
V8_INLINE ScriptOriginOptions Options() const { return options_; }
private:
@ -1042,6 +1092,7 @@ class ScriptOrigin {
ScriptOriginOptions options_;
Local<Integer> script_id_;
Local<Value> source_map_url_;
Local<PrimitiveArray> host_defined_options_;
};
/**
@ -1169,8 +1220,8 @@ class V8_EXPORT Module {
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
/**
* Returns the namespace object of this module. The module must have
* been successfully instantiated before and must not be errored.
* Returns the namespace object of this module.
* The module's status must be kEvaluated.
*/
Local<Value> GetModuleNamespace();
};
@ -1288,6 +1339,7 @@ class V8_EXPORT ScriptCompiler {
Local<Integer> resource_column_offset;
ScriptOriginOptions resource_options;
Local<Value> source_map_url;
Local<PrimitiveArray> host_defined_options;
// Cached data from previous compilation (if a kConsume*Cache flag is
// set), or hold newly generated cache data (kProduce*Cache flags) are
@ -1384,6 +1436,7 @@ class V8_EXPORT ScriptCompiler {
kProduceParserCache,
kConsumeParserCache,
kProduceCodeCache,
kProduceFullCodeCache,
kConsumeCodeCache
};
@ -1725,7 +1778,16 @@ class V8_EXPORT StackFrame {
// A StateTag represents a possible state of the VM.
enum StateTag { JS, GC, COMPILER, OTHER, EXTERNAL, IDLE };
enum StateTag {
JS,
GC,
PARSER,
BYTECODE_COMPILER,
COMPILER,
OTHER,
EXTERNAL,
IDLE
};
// A RegisterState represents the current state of registers used
// by the sampling profiler API.
@ -1772,7 +1834,7 @@ class V8_EXPORT JSON {
* \return The corresponding string if successfully stringified.
*/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
Local<Context> context, Local<Object> json_object,
Local<Context> context, Local<Value> json_object,
Local<String> gap = Local<String>());
};
@ -2442,7 +2504,8 @@ enum class NewStringType {
*/
class V8_EXPORT String : public Name {
public:
static const int kMaxLength = (1 << 28) - 16;
static constexpr int kMaxLength =
sizeof(void*) == 4 ? (1 << 28) - 16 : (1 << 30) - 1 - 24;
enum Encoding {
UNKNOWN_ENCODING = 0x1,
@ -2761,7 +2824,9 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Utf8Value {
public:
explicit Utf8Value(Local<v8::Value> obj);
V8_DEPRECATE_SOON("Use Isolate version",
explicit Utf8Value(Local<v8::Value> obj));
Utf8Value(Isolate* isolate, Local<v8::Value> obj);
~Utf8Value();
char* operator*() { return str_; }
const char* operator*() const { return str_; }
@ -2784,7 +2849,9 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Value {
public:
explicit Value(Local<v8::Value> obj);
V8_DEPRECATE_SOON("Use Isolate version",
explicit Value(Local<v8::Value> obj));
Value(Isolate* isolate, Local<v8::Value> obj);
~Value();
uint16_t* operator*() { return str_; }
const uint16_t* operator*() const { return str_; }
@ -4104,12 +4171,10 @@ class V8_EXPORT WasmCompiledModule : public Object {
// supports move semantics, and does not support copy semantics.
class TransferrableModule final {
public:
TransferrableModule(TransferrableModule&& src)
: compiled_code(std::move(src.compiled_code)),
wire_bytes(std::move(src.wire_bytes)) {}
TransferrableModule(TransferrableModule&& src) = default;
TransferrableModule(const TransferrableModule& src) = delete;
TransferrableModule& operator=(TransferrableModule&& src);
TransferrableModule& operator=(TransferrableModule&& src) = default;
TransferrableModule& operator=(const TransferrableModule& src) = delete;
private:
@ -4166,6 +4231,47 @@ class V8_EXPORT WasmCompiledModule : public Object {
static void CheckCast(Value* obj);
};
// TODO(mtrofin): when streaming compilation is done, we can rename this
// to simply WasmModuleObjectBuilder
class V8_EXPORT WasmModuleObjectBuilderStreaming final {
public:
WasmModuleObjectBuilderStreaming(Isolate* isolate);
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(const uint8_t*, size_t size);
void Finish();
void Abort(Local<Value> exception);
Local<Promise> GetPromise();
~WasmModuleObjectBuilderStreaming();
private:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> Buffer;
WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
delete;
WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) =
default;
WasmModuleObjectBuilderStreaming& operator=(
const WasmModuleObjectBuilderStreaming&) = delete;
WasmModuleObjectBuilderStreaming& operator=(
WasmModuleObjectBuilderStreaming&&) = default;
Isolate* isolate_ = nullptr;
#if V8_CC_MSVC
// We don't need the static Copy API, so the default
// NonCopyablePersistentTraits would be sufficient, however,
// MSVC eagerly instantiates the Copy.
// We ensure we don't use Copy, however, by compiling with the
// defaults everywhere else.
Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
#else
Persistent<Promise> promise_;
#endif
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
};
class V8_EXPORT WasmModuleObjectBuilder final {
public:
WasmModuleObjectBuilder(Isolate* isolate) : isolate_(isolate) {}
@ -4182,11 +4288,9 @@ class V8_EXPORT WasmModuleObjectBuilder final {
// Disable copy semantics *in this implementation*. We can choose to
// relax this, albeit it's not clear why.
WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder(WasmModuleObjectBuilder&& src)
: received_buffers_(std::move(src.received_buffers_)),
total_size_(src.total_size_) {}
WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default;
WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&);
WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default;
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
@ -4459,6 +4563,12 @@ class V8_EXPORT ArrayBufferView : public Object {
*/
class V8_EXPORT TypedArray : public ArrayBufferView {
public:
/*
* The largest typed array size that can be constructed using New.
*/
static constexpr size_t kMaxLength =
sizeof(void*) == 4 ? (1u << 30) - 1 : (1u << 31) - 1;
/**
* Number of elements in this typed array
* (e.g. for Int16Array, |ByteLength|/2).
@ -5069,10 +5179,11 @@ typedef void (*NamedPropertyDeleterCallback)(
Local<String> property,
const PropertyCallbackInfo<Boolean>& info);
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
*
* Note: The values in the array must be of type v8::Name.
*/
typedef void (*NamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@ -5193,10 +5304,11 @@ typedef void (*GenericNamedPropertyQueryCallback)(
typedef void (*GenericNamedPropertyDeleterCallback)(
Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
*
* Note: The values in the array must be of type v8::Name.
*/
typedef void (*GenericNamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@ -5277,7 +5389,10 @@ typedef void (*IndexedPropertyDeleterCallback)(
const PropertyCallbackInfo<Boolean>& info);
/**
* See `v8::GenericNamedPropertyEnumeratorCallback`.
* Returns an array containing the indices of the properties the indexed
* property getter intercepts.
*
* Note: The values in the array must be uint32_t.
*/
typedef void (*IndexedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
@ -6080,6 +6195,9 @@ typedef void (*FatalErrorCallback)(const char* location, const char* message);
typedef void (*OOMErrorCallback)(const char* location, bool is_heap_oom);
typedef void (*DcheckErrorCallback)(const char* file, int line,
const char* message);
typedef void (*MessageCallback)(Local<Message> message, Local<Value> data);
// --- Tracing ---
@ -6150,12 +6268,12 @@ typedef void (*CallCompletedCallback)(Isolate*);
typedef void (*DeprecatedCallCompletedCallback)();
/**
* HostImportDynamicallyCallback is called when we require the
* HostImportModuleDynamicallyCallback is called when we require the
* embedder to load a module. This is used as part of the dynamic
* import syntax.
*
* The referrer is the name of the file which calls the dynamic
* import. The referrer can be used to resolve the module location.
* The referrer contains metadata about the script/module that calls
* import.
*
* The specifier is the name of the module that should be imported.
*
@ -6165,10 +6283,13 @@ typedef void (*DeprecatedCallCompletedCallback)();
* The Promise returned from this function is forwarded to userland
* JavaScript. The embedder must resolve this promise with the module
* namespace object. In case of an exception, the embedder must reject
* this promise with the exception.
* this promise with the exception. If the promise creation itself
* fails (e.g. due to stack overflow), the embedder must propagate
* that exception by returning an empty MaybeLocal.
*/
typedef MaybeLocal<Promise> (*HostImportModuleDynamicallyCallback)(
Local<Context> context, Local<String> referrer, Local<String> specifier);
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier);
/**
* PromiseHook with type kInit is called when a new promise is
@ -6291,8 +6412,6 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target,
* Callback to check if code generation from strings is allowed. See
* Context::AllowCodeGenerationFromStrings.
*/
typedef bool (*DeprecatedAllowCodeGenerationFromStringsCallback)(
Local<Context> context);
typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context,
Local<String> source);
@ -6503,7 +6622,7 @@ struct JitCodeEvent {
struct line_info_t {
// PC offset
size_t offset;
// Code postion
// Code position
size_t pos;
// The position type.
PositionType position_type;
@ -6782,7 +6901,7 @@ class V8_EXPORT Isolate {
* deserialization. This array and its content must stay valid for the
* entire lifetime of the isolate.
*/
intptr_t* external_references;
const intptr_t* external_references;
/**
* Whether calling Atomics.wait (a function that may block) is allowed in
@ -6928,6 +7047,7 @@ class V8_EXPORT Isolate {
kAssigmentExpressionLHSIsCallInStrict = 37,
kPromiseConstructorReturnedUndefined = 38,
kConstructorNonUndefinedPrimitiveReturn = 39,
kLabeledExpressionStatement = 40,
// If you add new values here, you'll also need to update Chromium's:
// UseCounter.h, V8PerIsolateData.cpp, histograms.xml
@ -7362,8 +7482,8 @@ class V8_EXPORT Isolate {
DeprecatedCallCompletedCallback callback));
/**
* Experimental: Set the PromiseHook callback for various promise
* lifecycle events.
* Set the PromiseHook callback for various promise lifecycle
* events.
*/
void SetPromiseHook(PromiseHook hook);
@ -7580,9 +7700,6 @@ class V8_EXPORT Isolate {
*/
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
V8_DEPRECATED("Use callback with source parameter.",
void SetAllowCodeGenerationFromStringsCallback(
DeprecatedAllowCodeGenerationFromStringsCallback callback));
/**
* Embedder over{ride|load} injection points for wasm APIs. The expectation
@ -7720,7 +7837,7 @@ typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
* ReturnAddressLocationResolver is used as a callback function when v8 is
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
* location to wherever the profiler stashed the original return address.
*
* \param return_addr_location A location on stack where a machine
* return address resides.
@ -7743,15 +7860,6 @@ class V8_EXPORT V8 {
"Use isolate version",
void SetFatalErrorHandler(FatalErrorCallback that));
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetAllowCodeGenerationFromStringsCallback(
DeprecatedAllowCodeGenerationFromStringsCallback that));
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@ -7795,6 +7903,9 @@ class V8_EXPORT V8 {
static StartupData WarmUpSnapshotDataBlob(StartupData cold_startup_blob,
const char* warmup_source);
/** Set the callback to invoke in case of Dcheck failures. */
static void SetDcheckErrorHandler(DcheckErrorCallback that);
/**
* Adds a message listener.
*
@ -8147,7 +8258,7 @@ class V8_EXPORT SnapshotCreator {
* \param external_references a null-terminated array of external references
* that must be equivalent to CreateParams::external_references.
*/
SnapshotCreator(intptr_t* external_references = nullptr,
SnapshotCreator(const intptr_t* external_references = nullptr,
StartupData* existing_blob = nullptr);
~SnapshotCreator();
@ -8161,8 +8272,12 @@ class V8_EXPORT SnapshotCreator {
* Set the default context to be included in the snapshot blob.
* The snapshot will not contain the global proxy, and we expect one or a
* global object template to create one, to be provided upon deserialization.
*
* \param callback optional callback to serialize internal fields.
*/
void SetDefaultContext(Local<Context> context);
void SetDefaultContext(Local<Context> context,
SerializeInternalFieldsCallback callback =
SerializeInternalFieldsCallback());
/**
* Add additional context to be included in the snapshot blob.
@ -8514,7 +8629,9 @@ class V8_EXPORT Context {
static Local<Context> New(
Isolate* isolate, ExtensionConfiguration* extensions = NULL,
MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
MaybeLocal<Value> global_object = MaybeLocal<Value>());
MaybeLocal<Value> global_object = MaybeLocal<Value>(),
DeserializeInternalFieldsCallback internal_fields_deserializer =
DeserializeInternalFieldsCallback());
/**
* Create a new context from a (non-default) context snapshot. There
@ -8971,11 +9088,11 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
static const int kJSApiObjectType = 0xbb;
static const int kJSObjectType = 0xbc;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x82;
static const int kForeignType = 0x86;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
static const int kJSApiObjectType = 0xbf;
static const int kJSObjectType = 0xc0;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@ -9495,7 +9612,8 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name,
Local<Integer> script_id,
Local<Value> source_map_url,
Local<Boolean> resource_is_opaque,
Local<Boolean> is_wasm, Local<Boolean> is_module)
Local<Boolean> is_wasm, Local<Boolean> is_module,
Local<PrimitiveArray> host_defined_options)
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
@ -9505,10 +9623,14 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name,
!is_wasm.IsEmpty() && is_wasm->IsTrue(),
!is_module.IsEmpty() && is_module->IsTrue()),
script_id_(script_id),
source_map_url_(source_map_url) {}
source_map_url_(source_map_url),
host_defined_options_(host_defined_options) {}
Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
return host_defined_options_;
}
Local<Integer> ScriptOrigin::ResourceLineOffset() const {
return resource_line_offset_;
@ -9525,7 +9647,6 @@ Local<Integer> ScriptOrigin::ScriptID() const { return script_id_; }
Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
CachedData* data)
: source_string(string),
@ -9534,9 +9655,9 @@ ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
resource_column_offset(origin.ResourceColumnOffset()),
resource_options(origin.Options()),
source_map_url(origin.SourceMapUrl()),
host_defined_options(origin.HostDefinedOptions()),
cached_data(data) {}
ScriptCompiler::Source::Source(Local<String> string,
CachedData* data)
: source_string(string), cached_data(data) {}
@ -10225,14 +10346,6 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
#endif
}
void V8::SetAllowCodeGenerationFromStringsCallback(
DeprecatedAllowCodeGenerationFromStringsCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetAllowCodeGenerationFromStringsCallback(
reinterpret_cast<AllowCodeGenerationFromStringsCallback>(callback));
}
bool V8::IsDead() {
Isolate* isolate = Isolate::GetCurrent();
return isolate->IsDead();

45
deps/v8/infra/mb/mb_config.pyl

@ -78,6 +78,7 @@
'V8 Win64 - debug': 'gn_debug_x64_minimal_symbols',
# TODO(machenbach): Switch plugins on when errors are fixed.
'V8 Win64 - clang': 'gn_release_x64_clang',
'V8 Win64 ASAN': 'gn_release_x64_asan_no_lsan',
# Mac.
'V8 Mac': 'gn_release_x86',
'V8 Mac - debug': 'gn_debug_x86',
@ -106,6 +107,10 @@
'V8 Random Deopt Fuzzer - debug': 'gn_debug_x86',
},
'client.v8.clusterfuzz': {
'V8 Mac64 ASAN - release builder':
'gn_release_x64_asan_no_lsan_edge_verify_heap',
'V8 Mac64 ASAN - debug builder':
'gn_debug_x64_asan_no_lsan_static_edge',
'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
'V8 Linux64 - debug builder': 'gn_debug_x64',
'V8 Linux64 ASAN no inline - release builder':
@ -122,6 +127,9 @@
'gn_release_simulate_arm64_msan_no_origins_edge',
'V8 Linux MSAN chained origins':
'gn_release_simulate_arm64_msan_edge',
'V8 Linux64 UBSan - release builder': 'gn_release_x64_ubsan_recover',
'V8 Linux64 UBSanVptr - release builder':
'gn_release_x64_ubsan_vptr_recover_edge',
},
'client.v8.ports': {
# Arm.
@ -205,6 +213,7 @@
'v8_win_rel_ng': 'gn_release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
'gn_release_x86_no_snap_shared_minimal_symbols',
'v8_win64_asan_rel_ng': 'gn_release_x64_asan_no_lsan',
'v8_win64_dbg': 'gn_debug_x64_minimal_symbols',
'v8_win64_rel_ng': 'gn_release_x64_trybot',
'v8_mac_rel_ng': 'gn_release_x86_trybot',
@ -375,6 +384,9 @@
'minimal_symbols', 'swarming'],
'gn_release_x64_asan_no_lsan': [
'gn', 'release_bot', 'x64', 'asan', 'swarming'],
'gn_release_x64_asan_no_lsan_edge_verify_heap': [
'gn', 'release_bot', 'x64', 'asan', 'edge', 'swarming',
'v8_verify_heap'],
'gn_release_x64_asan_symbolized_edge_verify_heap': [
'gn', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
'v8_verify_heap'],
@ -389,7 +401,7 @@
'gn_release_x64_correctness_fuzzer' : [
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
'gn_release_x64_gcc_coverage': [
'gn', 'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx'],
'gn', 'release_bot', 'x64', 'coverage', 'gcc'],
'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
'gn_release_x64_minimal_symbols': [
@ -406,10 +418,14 @@
'minimal_symbols', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
'gn_release_x64_ubsan_recover': [
'gn', 'release_bot', 'x64', 'ubsan_recover', 'swarming'],
'gn_release_x64_ubsan_vptr': [
'gn', 'release_bot', 'x64', 'ubsan_vptr'],
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'swarming'],
'gn_release_x64_ubsan_vptr_recover_edge': [
'gn', 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover', 'swarming'],
'gn_release_x64_ubsan_vptr_minimal_symbols': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'],
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols', 'swarming'],
'gn_release_x64_valgrind': [
'gn', 'release_bot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
@ -425,10 +441,13 @@
'gn', 'debug_bot', 'x64', 'swarming'],
'gn_debug_x64_asan_edge': [
'gn', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
'gn_debug_x64_asan_no_lsan_static_edge': [
'gn', 'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
'v8_optimized_debug', 'x64', 'asan', 'edge', 'swarming'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
'gn_debug_x64_gcc': [
'gn', 'debug_bot', 'x64', 'gcc', 'no_custom_libcxx'],
'gn', 'debug_bot', 'x64', 'gcc'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_debug_x64_trybot': [
@ -462,10 +481,9 @@
'gn_release_x86_disassembler': [
'gn', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gn_release_x86_gcc': [
'gn', 'release_bot', 'x86', 'gcc', 'no_custom_libcxx'],
'gn', 'release_bot', 'x86', 'gcc'],
'gn_release_x86_gcc_minimal_symbols': [
'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols',
'no_custom_libcxx'],
'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
'gn_release_x86_gcmole': [
'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_gcmole_trybot': [
@ -591,7 +609,8 @@
},
'gcc': {
'gn_args': 'is_clang=false use_sysroot=false',
# TODO(machenbach): Remove cxx11 restriction when updating gcc version.
'gn_args': 'is_clang=false use_cxx11=true',
'gyp_defines': 'clang=0',
},
@ -726,6 +745,11 @@
'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_recover': {
# Ubsan with recovery.
'gn_args': 'is_ubsan=true is_ubsan_no_recover=false',
},
'ubsan_vptr': {
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
@ -733,6 +757,11 @@
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=true',
},
'ubsan_vptr_recover': {
# Ubsan vptr with recovery.
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
'valgrind': {
'gn_args': 'v8_has_valgrind=true',
'gyp_defines': 'has_valgrind=1',

42
deps/v8/samples/hello-world.cc

@ -9,53 +9,53 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
using namespace v8;
int main(int argc, char* argv[]) {
// Initialize V8.
V8::InitializeICUDefaultLocation(argv[0]);
V8::InitializeExternalStartupData(argv[0]);
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
// Create a new Isolate and make it the current one.
Isolate::CreateParams create_params;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
Isolate* isolate = Isolate::New(create_params);
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
v8::Isolate::Scope isolate_scope(isolate);
// Create a stack-allocated handle scope.
HandleScope handle_scope(isolate);
v8::HandleScope handle_scope(isolate);
// Create a new context.
Local<Context> context = Context::New(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
// Enter the context for compiling and running the hello world script.
Context::Scope context_scope(context);
v8::Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
Local<String> source =
String::NewFromUtf8(isolate, "'Hello' + ', World!'",
NewStringType::kNormal).ToLocalChecked();
v8::Local<v8::String> source =
v8::String::NewFromUtf8(isolate, "'Hello' + ', World!'",
v8::NewStringType::kNormal)
.ToLocalChecked();
// Compile the source code.
Local<Script> script = Script::Compile(context, source).ToLocalChecked();
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
// Run the script to get the result.
Local<Value> result = script->Run(context).ToLocalChecked();
v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
// Convert the result to an UTF8 string and print it.
String::Utf8Value utf8(result);
v8::String::Utf8Value utf8(isolate, result);
printf("%s\n", *utf8);
}
// Dispose the isolate and tear down V8.
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return 0;

53
deps/v8/samples/process.cc

@ -35,8 +35,30 @@
#include <map>
#include <string>
using namespace std;
using namespace v8;
using std::map;
using std::pair;
using std::string;
using v8::Context;
using v8::EscapableHandleScope;
using v8::External;
using v8::Function;
using v8::FunctionTemplate;
using v8::Global;
using v8::HandleScope;
using v8::Isolate;
using v8::Local;
using v8::MaybeLocal;
using v8::Name;
using v8::NamedPropertyHandlerConfiguration;
using v8::NewStringType;
using v8::Object;
using v8::ObjectTemplate;
using v8::PropertyCallbackInfo;
using v8::Script;
using v8::String;
using v8::TryCatch;
using v8::Value;
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
@ -144,9 +166,10 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static void LogCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1) return;
HandleScope scope(args.GetIsolate());
Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
Local<Value> arg = args[0];
String::Utf8Value value(arg);
String::Utf8Value value(isolate, arg);
HttpRequestProcessor::Log(*value);
}
@ -221,7 +244,7 @@ bool JsHttpRequestProcessor::ExecuteScript(Local<String> script) {
// Compile the script and check for errors.
Local<Script> compiled_script;
if (!Script::Compile(context, script).ToLocal(&compiled_script)) {
String::Utf8Value error(try_catch.Exception());
String::Utf8Value error(GetIsolate(), try_catch.Exception());
Log(*error);
// The script failed to compile; bail out.
return false;
@ -231,11 +254,12 @@ bool JsHttpRequestProcessor::ExecuteScript(Local<String> script) {
Local<Value> result;
if (!compiled_script->Run(context).ToLocal(&result)) {
// The TryCatch above is still in effect and will have caught the error.
String::Utf8Value error(try_catch.Exception());
String::Utf8Value error(GetIsolate(), try_catch.Exception());
Log(*error);
// Running the script failed; bail out.
return false;
}
return true;
}
@ -295,17 +319,16 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
v8::Local<v8::Function>::New(GetIsolate(), process_);
Local<Value> result;
if (!process->Call(context, context->Global(), argc, argv).ToLocal(&result)) {
String::Utf8Value error(try_catch.Exception());
String::Utf8Value error(GetIsolate(), try_catch.Exception());
Log(*error);
return false;
} else {
return true;
}
return true;
}
JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// Dispose the persistent handles. When no one else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
context_.Reset();
@ -366,8 +389,8 @@ map<string, string>* JsHttpRequestProcessor::UnwrapMap(Local<Object> obj) {
// Convert a JavaScript string to a std::string. To not bother too
// much with string encodings we just use ascii.
string ObjectToString(Local<Value> value) {
String::Utf8Value utf8_value(value);
string ObjectToString(v8::Isolate* isolate, Local<Value> value) {
String::Utf8Value utf8_value(isolate, value);
return string(*utf8_value);
}
@ -380,7 +403,7 @@ void JsHttpRequestProcessor::MapGet(Local<Name> name,
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the JavaScript string to a std::string.
string key = ObjectToString(Local<String>::Cast(name));
string key = ObjectToString(info.GetIsolate(), Local<String>::Cast(name));
// Look up the value if it exists using the standard STL ideom.
map<string, string>::iterator iter = obj->find(key);
@ -405,8 +428,8 @@ void JsHttpRequestProcessor::MapSet(Local<Name> name, Local<Value> value_obj,
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the key and value to std::strings.
string key = ObjectToString(Local<String>::Cast(name));
string value = ObjectToString(value_obj);
string key = ObjectToString(info.GetIsolate(), Local<String>::Cast(name));
string value = ObjectToString(info.GetIsolate(), value_obj);
// Update the map.
(*obj)[key] = value;

19
deps/v8/samples/shell.cc

@ -147,7 +147,7 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
} else {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
v8::String::Utf8Value str(args.GetIsolate(), args[i]);
const char* cstr = ToCString(str);
printf("%s", cstr);
}
@ -166,7 +166,7 @@ void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
v8::String::Utf8Value file(args[0]);
v8::String::Utf8Value file(args.GetIsolate(), args[0]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
@ -180,17 +180,17 @@ void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
args.GetReturnValue().Set(source);
}
// The callback that is invoked by v8 whenever the JavaScript 'load'
// function is called. Loads, compiles and executes its argument
// JavaScript file.
void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
v8::String::Utf8Value file(args.GetIsolate(), args[i]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
@ -361,7 +361,7 @@ bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.
v8::String::Utf8Value str(result);
v8::String::Utf8Value str(isolate, result);
const char* cstr = ToCString(str);
printf("%s\n", cstr);
}
@ -373,7 +373,7 @@ bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
v8::HandleScope handle_scope(isolate);
v8::String::Utf8Value exception(try_catch->Exception());
v8::String::Utf8Value exception(isolate, try_catch->Exception());
const char* exception_string = ToCString(exception);
v8::Local<v8::Message> message = try_catch->Message();
if (message.IsEmpty()) {
@ -382,14 +382,15 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
fprintf(stderr, "%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
v8::String::Utf8Value filename(isolate,
message->GetScriptOrigin().ResourceName());
v8::Local<v8::Context> context(isolate->GetCurrentContext());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber(context).FromJust();
fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(
message->GetSourceLine(context).ToLocalChecked());
isolate, message->GetSourceLine(context).ToLocalChecked());
const char* sourceline_string = ToCString(sourceline);
fprintf(stderr, "%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
@ -406,7 +407,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
if (try_catch->StackTrace(context).ToLocal(&stack_trace_string) &&
stack_trace_string->IsString() &&
v8::Local<v8::String>::Cast(stack_trace_string)->Length() > 0) {
v8::String::Utf8Value stack_trace(stack_trace_string);
v8::String::Utf8Value stack_trace(isolate, stack_trace_string);
const char* stack_trace_string = ToCString(stack_trace);
fprintf(stderr, "%s\n", stack_trace_string);
}

105
deps/v8/src/accessors.cc

@ -11,7 +11,6 @@
#include "src/factory.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/list-inl.h"
#include "src/messages.h"
#include "src/property-details.h"
#include "src/prototype.h"
@ -105,6 +104,8 @@ void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope stats_scope(
isolate, &RuntimeCallStats::ReconfigureToDataProperty);
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
@ -152,8 +153,7 @@ void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::AccessorNameGetterCallback_ArrayLength);
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@ -165,6 +165,7 @@ void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
@ -282,8 +283,7 @@ void Accessors::StringLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::AccessorNameGetterCallback_StringLength);
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::StringLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
@ -681,8 +681,8 @@ void Accessors::FunctionPrototypeGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::AccessorNameGetterCallback_FunctionPrototype);
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::FunctionPrototypeGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@ -694,6 +694,8 @@ void Accessors::FunctionPrototypeSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::FunctionPrototypeSetter);
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
@ -779,7 +781,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
Factory* factory = isolate->factory();
TranslatedState translated_values(frame);
translated_values.Prepare(false, frame->fp());
translated_values.Prepare(frame->fp());
int argument_count = 0;
TranslatedFrame* translated_frame =
@ -819,11 +821,12 @@ static Handle<Object> ArgumentsForInlinedFunction(
static int FindFunctionInFrame(JavaScriptFrame* frame,
Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation;
List<FrameSummary> frames(2);
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
if (*frames[i].AsJavaScript().function() == *function) return i;
for (size_t i = frames.size(); i != 0; i--) {
if (*frames[i - 1].AsJavaScript().function() == *function) {
return static_cast<int>(i) - 1;
}
}
return -1;
}
@ -848,7 +851,10 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
}
// Find the frame that holds the actual arguments passed to the function.
it.AdvanceToArgumentsFrame();
if (it.frame()->has_adapted_arguments()) {
it.AdvanceOneFrame();
DCHECK(it.frame()->is_arguments_adaptor());
}
frame = it.frame();
// Get the number of arguments and construct an arguments object
@ -926,82 +932,78 @@ static inline bool AllowAccessToFunction(Context* current_context,
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: isolate_(isolate), frame_iterator_(isolate), frames_(2), index_(0) {
explicit FrameFunctionIterator(Isolate* isolate)
: isolate_(isolate), frame_iterator_(isolate) {
GetFrames();
}
JSFunction* next() {
MaybeHandle<JSFunction> next() {
while (true) {
if (frames_.length() == 0) return NULL;
JSFunction* next_function = *frames_[index_].AsJavaScript().function();
index_--;
if (index_ < 0) {
if (frames_.empty()) return MaybeHandle<JSFunction>();
Handle<JSFunction> next_function =
frames_.back().AsJavaScript().function();
frames_.pop_back();
if (frames_.empty()) {
GetFrames();
}
// Skip functions from other origins.
if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
if (!AllowAccessToFunction(isolate_->context(), *next_function)) continue;
return next_function;
}
}
// Iterate through functions until the first occurence of 'function'.
// Iterate through functions until the first occurrence of 'function'.
// Returns true if 'function' is found, and false if the iterator ends
// without finding it.
bool Find(JSFunction* function) {
JSFunction* next_function;
bool Find(Handle<JSFunction> function) {
Handle<JSFunction> next_function;
do {
next_function = next();
if (next_function == function) return true;
} while (next_function != NULL);
return false;
if (!next().ToHandle(&next_function)) return false;
} while (!next_function.is_identical_to(function));
return true;
}
private:
void GetFrames() {
frames_.Rewind(0);
DCHECK(frames_.empty());
if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame();
frame->Summarize(&frames_);
DCHECK(frames_.length() > 0);
DCHECK(!frames_.empty());
frame_iterator_.Advance();
index_ = frames_.length() - 1;
}
Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_;
List<FrameSummary> frames_;
int index_;
std::vector<FrameSummary> frames_;
};
MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation;
FrameFunctionIterator it(isolate, no_allocation);
FrameFunctionIterator it(isolate);
if (function->shared()->native()) {
return MaybeHandle<JSFunction>();
}
// Find the function from the frames.
if (!it.Find(*function)) {
if (!it.Find(function)) {
// No frame corresponding to the given function found. Return null.
return MaybeHandle<JSFunction>();
}
// Find previously called non-toplevel function.
JSFunction* caller;
Handle<JSFunction> caller;
do {
caller = it.next();
if (caller == NULL) return MaybeHandle<JSFunction>();
if (!it.next().ToHandle(&caller)) return MaybeHandle<JSFunction>();
} while (caller->shared()->is_toplevel());
// If caller is not user code and caller's caller is also not user code,
// use that instead.
JSFunction* potential_caller = caller;
while (potential_caller != NULL &&
!potential_caller->shared()->IsUserJavaScript()) {
caller = potential_caller;
MaybeHandle<JSFunction> potential_caller = caller;
while (!potential_caller.is_null() &&
!potential_caller.ToHandleChecked()->shared()->IsUserJavaScript()) {
caller = potential_caller.ToHandleChecked();
potential_caller = it.next();
}
if (!caller->shared()->native() && potential_caller != NULL) {
caller = potential_caller;
if (!caller->shared()->native() && !potential_caller.is_null()) {
caller = potential_caller.ToHandleChecked();
}
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
@ -1010,10 +1012,10 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
return MaybeHandle<JSFunction>();
}
// Don't return caller from another security context.
if (!AllowAccessToFunction(isolate->context(), caller)) {
if (!AllowAccessToFunction(isolate->context(), *caller)) {
return MaybeHandle<JSFunction>();
}
return Handle<JSFunction>(caller);
return caller;
}
@ -1051,9 +1053,8 @@ Handle<AccessorInfo> Accessors::FunctionCallerInfo(
void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(
isolate,
&RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionLength);
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::BoundFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@ -1088,8 +1089,8 @@ Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionName);
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::BoundFunctionNameGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));

25
deps/v8/src/address-map.h

@ -84,6 +84,11 @@ class SerializerReference {
ValueIndexBits::encode(index));
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(kExternalSpace) |
ValueIndexBits::encode(index));
}
static SerializerReference LargeObjectReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(LO_SPACE) |
ValueIndexBits::encode(index));
@ -119,6 +124,15 @@ class SerializerReference {
return ValueIndexBits::decode(bitfield_);
}
bool is_off_heap_backing_store_reference() const {
return SpaceBits::decode(bitfield_) == kExternalSpace;
}
uint32_t off_heap_backing_store_index() const {
DCHECK(is_off_heap_backing_store_reference());
return ValueIndexBits::decode(bitfield_);
}
uint32_t large_object_index() const {
DCHECK(is_back_reference());
return ValueIndexBits::decode(bitfield_);
@ -160,6 +174,8 @@ class SerializerReference {
// [ kSpecialValueSpace ] [ Special value index ]
// Attached reference
// [ kAttachedReferenceSpace ] [ Attached reference index ]
// External
// [ kExternalSpace ] [ External reference index ]
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
@ -167,7 +183,8 @@ class SerializerReference {
static const int kSpecialValueSpace = LAST_SPACE + 1;
static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
STATIC_ASSERT(kAttachedReferenceSpace < (1 << kSpaceTagSize));
static const int kExternalSpace = kAttachedReferenceSpace + 1;
STATIC_ASSERT(kExternalSpace < (1 << kSpaceTagSize));
static const int kInvalidValue = 0;
static const int kDummyValue = 1;
@ -193,13 +210,13 @@ class SerializerReferenceMap {
SerializerReferenceMap()
: no_allocation_(), map_(), attached_reference_index_(0) {}
SerializerReference Lookup(HeapObject* obj) {
SerializerReference Lookup(void* obj) {
Maybe<uint32_t> maybe_index = map_.Get(obj);
return maybe_index.IsJust() ? SerializerReference(maybe_index.FromJust())
: SerializerReference();
}
void Add(HeapObject* obj, SerializerReference b) {
void Add(void* obj, SerializerReference b) {
DCHECK(b.is_valid());
DCHECK(map_.Get(obj).IsNothing());
map_.Set(obj, b.bitfield_);
@ -214,7 +231,7 @@ class SerializerReferenceMap {
private:
DisallowHeapAllocation no_allocation_;
HeapObjectToIndexHashMap map_;
PointerToIndexHashMap<void*> map_;
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};

251
deps/v8/src/allocation.cc

@ -6,8 +6,11 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/flags.h"
#include "src/utils.h"
#include "src/v8.h"
@ -18,10 +21,32 @@
namespace v8 {
namespace internal {
namespace {
void* AlignedAllocInternal(size_t size, size_t alignment) {
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
// posix_memalign is not exposed in some Android versions, so we fall back to
// memalign. See http://code.google.com/p/android/issues/detail?id=35391.
ptr = memalign(alignment, size);
#else
if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
return ptr;
}
} // namespace
void* Malloced::New(size_t size) {
void* result = malloc(size);
if (result == NULL) {
V8::FatalProcessOutOfMemory("Malloced operator new");
if (result == nullptr) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
result = malloc(size);
if (result == nullptr) {
V8::FatalProcessOutOfMemory("Malloced operator new");
}
}
return result;
}
@ -54,17 +79,14 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo(alignment));
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
// posix_memalign is not exposed in some Android versions, so we fall back to
// memalign. See http://code.google.com/p/android/issues/detail?id=35391.
ptr = memalign(alignment, size);
#else
if (posix_memalign(&ptr, alignment, size)) ptr = NULL;
#endif
if (ptr == NULL) V8::FatalProcessOutOfMemory("AlignedAlloc");
void* ptr = AlignedAllocInternal(size, alignment);
if (ptr == nullptr) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
ptr = AlignedAllocInternal(size, alignment);
if (ptr == nullptr) {
V8::FatalProcessOutOfMemory("AlignedAlloc");
}
}
return ptr;
}
@ -80,5 +102,208 @@ void AlignedFree(void *ptr) {
#endif
}
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
: address_(nullptr), size_(0) {
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
}
}
void VirtualMemory::Reset() {
address_ = nullptr;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return base::OS::CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return base::OS::UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, base::OS::CommitPageSize()));
base::OS::Guard(address, base::OS::CommitPageSize());
return true;
}
size_t VirtualMemory::ReleasePartial(void* free_start) {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
reinterpret_cast<size_t>(address_));
CHECK(InVM(free_start, size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, reinterpret_cast<void*>(
reinterpret_cast<size_t>(address_) + size_));
const bool result =
base::OS::ReleasePartialRegion(address_, size_, free_start, size);
USE(result);
DCHECK(result);
size_ -= size;
return size;
}
void VirtualMemory::Release() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
void* address = address_;
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
address_ = from->address_;
size_ = from->size_;
from->Reset();
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
VirtualMemory first_try(size, hint);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, hint);
result->TakeControl(&second_try);
return result->IsReserved();
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory first_try(size, alignment, hint);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, alignment, hint);
result->TakeControl(&second_try);
return result->IsReserved();
}
namespace {
struct RNGInitializer {
static void Construct(void* mem) {
auto rng = new (mem) base::RandomNumberGenerator();
int64_t random_seed = FLAG_random_seed;
if (random_seed) {
rng->SetSeed(random_seed);
}
}
};
} // namespace
static base::LazyInstance<base::RandomNumberGenerator, RNGInitializer>::type
random_number_generator = LAZY_INSTANCE_INITIALIZER;
void* GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
return NULL;
#endif
uintptr_t raw_addr;
random_number_generator.Pointer()->NextBytes(&raw_addr, sizeof(raw_addr));
#if V8_OS_POSIX
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
raw_addr &= V8_UINT64_C(0x3ffff000);
// Use extra address space to isolate the mmap regions.
raw_addr += V8_UINT64_C(0x400000000000);
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x03fffffff000);
#else
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
raw_addr &= V8_UINT64_C(0xfffffff000);
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
#elif V8_OS_AIX
// The range 0x30000000 - 0xD0000000 is available on AIX;
// choose the upper range.
raw_addr += 0x90000000;
#else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
raw_addr += 0x20000000;
#endif
#endif
#else // V8_OS_WIN
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT
static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else
static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
raw_addr <<= kPageSizeBits;
raw_addr += kAllocationRandomAddressMin;
raw_addr &= kAllocationRandomAddressMax;
#endif // V8_OS_WIN
return reinterpret_cast<void*>(raw_addr);
}
} // namespace internal
} // namespace v8

128
deps/v8/src/allocation.h

@ -5,15 +5,21 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
#include "include/v8-platform.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
// Called when allocation routines fail to allocate.
// This function should not return, but should terminate the current
// processing.
// This file defines memory allocation functions. If a first attempt at an
// allocation fails, these functions call back into the embedder, then attempt
// the allocation a second time. The embedder callback must not reenter V8.
// Called when allocation routines fail to allocate, even with a possible retry.
// This function should not return, but should terminate the current processing.
V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char* message);
// Superclass for classes managed with new & delete.
@ -26,28 +32,24 @@ class V8_EXPORT_PRIVATE Malloced {
static void Delete(void* p);
};
// DEPRECATED
// TODO(leszeks): Delete this during a quiet period
#define BASE_EMBEDDED
// Superclass for classes only using static method functions.
// The subclass of AllStatic cannot be instantiated at all.
class AllStatic {
#ifdef DEBUG
public:
AllStatic() = delete;
#endif
};
template <typename T>
T* NewArray(size_t size) {
T* result = new T[size];
if (result == NULL) FatalProcessOutOfMemory("NewArray");
T* result = new (std::nothrow) T[size];
if (result == nullptr) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
result = new (std::nothrow) T[size];
if (result == nullptr) FatalProcessOutOfMemory("NewArray");
}
return result;
}
template <typename T,
typename = typename std::enable_if<IS_TRIVIALLY_COPYABLE(T)>::type>
T* NewArray(size_t size, T default_val) {
T* result = reinterpret_cast<T*>(NewArray<uint8_t>(sizeof(T) * size));
for (size_t i = 0; i < size; ++i) result[i] = default_val;
return result;
}
template <typename T>
void DeleteArray(T* array) {
@ -74,6 +76,92 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved() const { return address_ != nullptr; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
void* address() const {
DCHECK(IsReserved());
return address_;
}
void* end() const {
DCHECK(IsReserved());
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address_) +
size_);
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Creates a single guard page at the given address.
bool Guard(void* address);
// Releases the memory after |free_start|. Returns the bytes released.
size_t ReleasePartial(void* free_start);
void Release();
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
void TakeControl(VirtualMemory* from);
bool InVM(void* address, size_t size) {
return (reinterpret_cast<uintptr_t>(address_) <=
reinterpret_cast<uintptr_t>(address)) &&
((reinterpret_cast<uintptr_t>(address_) + size_) >=
(reinterpret_cast<uintptr_t>(address) + size));
}
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result);
// Generate a random address to be used for hinting mmap().
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
} // namespace internal
} // namespace v8

5
deps/v8/src/api-arguments-inl.h

@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_API_ARGUMENTS_INL_H_
#define V8_API_ARGUMENTS_INL_H_
#include "src/api-arguments.h"
#include "src/tracing/trace-event.h"
@ -153,3 +156,5 @@ void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
} // namespace internal
} // namespace v8
#endif // V8_API_ARGUMENTS_INL_H_

22
deps/v8/src/api-arguments.h

@ -63,6 +63,8 @@ Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
return result;
}
// Note: Calling args.Call() sets the return value on args. For multiple
// Call()'s, a new args should be used every time.
class PropertyCallbackArguments
: public CustomArguments<PropertyCallbackInfo<Value> > {
public:
@ -97,14 +99,14 @@ class PropertyCallbackArguments
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
@ -139,6 +141,10 @@ class PropertyCallbackArguments
}
bool PerformSideEffectCheck(Isolate* isolate, Address function);
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.
DISALLOW_COPY_AND_ASSIGN(PropertyCallbackArguments);
};
class FunctionCallbackArguments

5
deps/v8/src/api-natives.cc

@ -132,6 +132,7 @@ void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
// Copy map so it won't interfere constructor's initial map.
Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
new_map->set_is_access_check_needed(true);
new_map->set_may_have_interesting_symbols(true);
JSObject::MigrateToMap(object, new_map);
}
@ -558,6 +559,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
HOLEY_SMI_ELEMENTS);
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
object_map->set_may_have_interesting_symbols(true);
Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(object_map);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
@ -687,7 +689,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
break;
default:
UNREACHABLE();
type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
@ -709,11 +710,13 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark as needs_access_check if needed.
if (obj->needs_access_check()) {
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
}
// Set interceptor information in the map.
if (!obj->named_property_handler()->IsUndefined(isolate)) {
map->set_has_named_interceptor();
map->set_may_have_interesting_symbols(true);
}
if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
map->set_has_indexed_interceptor();

549
deps/v8/src/api.cc

File diff suppressed because it is too large

14
deps/v8/src/api.h

@ -111,7 +111,10 @@ class RegisteredExtension {
V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise)
V(Promise, JSPromise) \
V(Primitive, Object) \
V(PrimitiveArray, FixedArray) \
V(ScriptOrModule, Script)
class Utils {
public:
@ -209,6 +212,12 @@ class Utils {
v8::internal::Handle<v8::internal::JSWeakMap> obj);
static inline Local<Function> CallableToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Primitive> ToLocalPrimitive(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<PrimitiveArray> ToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<ScriptOrModule> ScriptOrModuleToLocal(
v8::internal::Handle<v8::internal::Script> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@ -325,6 +334,9 @@ MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
MAKE_TO_LOCAL(ToLocal, FixedArray, PrimitiveArray)
MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL

122
deps/v8/src/arm/assembler-arm-inl.h

@ -152,73 +152,6 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
cell);
}
}
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on Arm.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start();
}
Address RelocInfo::debug_call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
if (host() != NULL) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -237,76 +170,31 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) : rmode_(rmode) {
value_.immediate = immediate;
rmode_ = rmode;
}
Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
Operand::Operand(const ExternalReference& f)
: rmode_(RelocInfo::EXTERNAL_REFERENCE) {
value_.immediate = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE32) {
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
Operand::Operand(Register rm) {
rm_ = rm;
rs_ = no_reg;
shift_op_ = LSL;
shift_imm_ = 0;
}
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {

287
deps/v8/src/arm/assembler-arm.cc

@ -338,33 +338,21 @@ bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
Address RelocInfo::embedded_address() const {
return Assembler::target_address_at(pc_, host_);
}
uint32_t RelocInfo::wasm_memory_size_reference() {
DCHECK(IsWasmMemorySizeReference(rmode_));
uint32_t RelocInfo::embedded_size() const {
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
Address RelocInfo::wasm_global_reference() {
DCHECK(IsWasmGlobalReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@ -425,52 +413,37 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
rn_ = rn;
rm_ = no_reg;
offset_ = offset;
am_ = am;
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am)
: rn_(rn), rm_(no_reg), offset_(offset), am_(am) {
// Accesses below the stack pointer are not safe, and are prohibited by the
// ABI. We can check obvious violations here.
if (rn.is(sp)) {
if (rn == sp) {
if (am == Offset) DCHECK_LE(0, offset);
if (am == NegOffset) DCHECK_GE(0, offset);
}
}
MemOperand::MemOperand(Register rn, Register rm, AddrMode am)
: rn_(rn), rm_(rm), shift_op_(LSL), shift_imm_(0), am_(am) {}
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
rn_ = rn;
rm_ = rm;
shift_op_ = LSL;
shift_imm_ = 0;
am_ = am;
}
MemOperand::MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am) {
MemOperand::MemOperand(Register rn, Register rm, ShiftOp shift_op,
int shift_imm, AddrMode am)
: rn_(rn),
rm_(rm),
shift_op_(shift_op),
shift_imm_(shift_imm & 31),
am_(am) {
DCHECK(is_uint5(shift_imm));
rn_ = rn;
rm_ = rm;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
am_ = am;
}
NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align)
: rn_(rn), rm_(am == Offset ? pc : sp) {
DCHECK((am == Offset) || (am == PostIndex));
rn_ = rn;
rm_ = (am == Offset) ? pc : sp;
SetAlignment(align);
}
NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
rn_ = rn;
rm_ = rm;
NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align)
: rn_(rn), rm_(rm) {
SetAlignment(align);
}
@ -519,18 +492,16 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
const Instr kPushRegPattern = al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
const Instr kPopRegPattern = al | B26 | L | 4 | PostIndex | sp.code() * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
const Instr kLdrPCImmedPattern = 5 * B24 | L | pc.code() * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | pc.code() * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@ -556,14 +527,11 @@ const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | Register::kCode_fp * B16;
const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | Register::kCode_fp * B16;
const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | Register::kCode_fp * B16;
const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | Register::kCode_fp * B16;
al | B26 | L | NegOffset | fp.code() * B16;
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
@ -734,23 +702,17 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
Register Assembler::GetRd(Instr instr) {
Register reg;
reg.reg_code = Instruction::RdValue(instr);
return reg;
return Register::from_code(Instruction::RdValue(instr));
}
Register Assembler::GetRn(Instr instr) {
Register reg;
reg.reg_code = Instruction::RnValue(instr);
return reg;
return Register::from_code(Instruction::RnValue(instr));
}
Register Assembler::GetRm(Instr instr) {
Register reg;
reg.reg_code = Instruction::RmValue(instr);
return reg;
return Register::from_code(Instruction::RmValue(instr));
}
@ -1053,7 +1015,7 @@ void Assembler::next(Label* L) {
DCHECK(L->is_linked());
int link = target_at(L->pos());
if (link == L->pos()) {
// Branch target points to the same instuction. This is the end of the link
// Branch target points to the same instruction. This is the end of the link
// chain.
L->Unuse();
} else {
@ -1194,7 +1156,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
DCHECK(!x.MustOutputRelocInfo(this));
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
Register target = !rd.is(pc) ? rd : temps.Acquire();
Register target = rd != pc ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
@ -1252,7 +1214,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
(rd.is_valid() && !rd.is(rn) && !rd.is(pc)) ? rd : temps.Acquire();
(rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
mov(scratch, x, LeaveCC, cond);
AddrMode1(instr, rd, rn, Operand(scratch));
}
@ -1264,12 +1226,12 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
} else if (!rn.is_valid()) {
// Emit a move instruction. If the operand is a register-shifted register,
// then prevent the destination from being PC as this is unpredictable.
DCHECK(!x.IsRegisterShiftedRegister() || !rd.is(pc));
DCHECK(!x.IsRegisterShiftedRegister() || rd != pc);
emit(instr | rd.code() * B12);
} else {
emit(instr | rn.code() * B16 | rd.code() * B12);
}
if (rn.is(pc) || x.rm_.is(pc)) {
if (rn == pc || x.rm_ == pc) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1);
}
@ -1291,7 +1253,7 @@ bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
} else {
DCHECK(x.IsRegisterShiftedRegister());
// It is unpredictable to use the PC in this case.
DCHECK(!x.rm_.is(pc) && !x.rs_.is(pc));
DCHECK(x.rm_ != pc && x.rs_ != pc);
*instr |= x.rs_.code() * B8 | x.shift_op_ | B4 | x.rm_.code();
}
@ -1315,7 +1277,7 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
// Allow re-using rd for load instructions if possible.
bool is_load = (instr & L) == L;
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
(is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@ -1327,10 +1289,10 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized.
DCHECK(!x.rm_.is(pc));
DCHECK(x.rm_ != pc);
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
}
DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
@ -1352,7 +1314,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
(is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
@ -1361,29 +1323,29 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset seperately
// Scaled register offsets are not supported, compute the offset separately
// to a scratch register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
(is_load && rd != x.rn_ && rd != pc) ? rd : temps.Acquire();
mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
} else {
// Register offset.
DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
DCHECK((am & (P | W)) == P || x.rm_ != pc); // no pc index with writeback
instr |= x.rm_.code();
}
DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
}
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK(rl != 0);
DCHECK(!rn.is(pc));
DCHECK(rn != pc);
emit(instr | rn.code()*B16 | rl);
}
@ -1401,7 +1363,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
am ^= U;
}
DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
// Post-indexed addressing requires W == 1; different than in AddrMode2/3.
if ((am & P) == 0)
@ -1465,12 +1427,12 @@ void Assembler::blx(int branch_offset) {
}
void Assembler::blx(Register target, Condition cond) {
DCHECK(!target.is(pc));
DCHECK(target != pc);
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) {
DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
DCHECK(target != pc); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@ -1596,9 +1558,8 @@ void Assembler::orr(Register dst, Register src1, Register src2, SBit s,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
DCHECK(!(src.IsRegister() && src.rm().is(dst) && s == LeaveCC && cond == al));
// the mov instruction. They must be generated using nop(int/NopMarkerTypes).
DCHECK(!(src.IsRegister() && src.rm() == dst && s == LeaveCC && cond == al));
AddrMode1(cond | MOV | s, dst, no_reg, src);
}
@ -1697,7 +1658,7 @@ void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
// Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1705,7 +1666,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
DCHECK(IsEnabled(ARMv7));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
@ -1714,7 +1675,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
@ -1723,7 +1684,7 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
void Assembler::udiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc);
DCHECK(IsEnabled(SUDIV));
emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
src2.code() * B8 | B4 | src1.code());
@ -1732,7 +1693,7 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc);
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
}
@ -1740,7 +1701,7 @@ void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
}
@ -1748,7 +1709,7 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(dst != pc && src1 != pc && src2 != pc);
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
src2.code() * B8 | B4 | src1.code());
}
@ -1760,8 +1721,8 @@ void Assembler::smlal(Register dstL,
Register src2,
SBit s,
Condition cond) {
DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(!dstL.is(dstH));
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1773,8 +1734,8 @@ void Assembler::smull(Register dstL,
Register src2,
SBit s,
Condition cond) {
DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(!dstL.is(dstH));
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1786,8 +1747,8 @@ void Assembler::umlal(Register dstL,
Register src2,
SBit s,
Condition cond) {
DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(!dstL.is(dstH));
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1799,8 +1760,8 @@ void Assembler::umull(Register dstL,
Register src2,
SBit s,
Condition cond) {
DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
DCHECK(!dstL.is(dstH));
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1808,7 +1769,7 @@ void Assembler::umull(Register dstL,
// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK(dst != pc && src != pc);
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | CLZ | src.code());
}
@ -1821,7 +1782,7 @@ void Assembler::usat(Register dst,
int satpos,
const Operand& src,
Condition cond) {
DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK(dst != pc && src.rm_ != pc);
DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK(src.IsImmediateShiftedRegister());
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@ -1848,7 +1809,7 @@ void Assembler::ubfx(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
@ -1867,7 +1828,7 @@ void Assembler::sbfx(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
@ -1881,7 +1842,7 @@ void Assembler::sbfx(Register dst,
// bfc dst, #lsb, #width
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc));
DCHECK(dst != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
@ -1899,7 +1860,7 @@ void Assembler::bfi(Register dst,
int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc) && !src.is(pc));
DCHECK(dst != pc && src != pc);
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width >= 1) && (width <= (32 - lsb)));
int msb = lsb + width - 1;
@ -1915,10 +1876,10 @@ void Assembler::pkhbt(Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc));
DCHECK(src2.rm() != pc);
DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
DCHECK(src2.shift_op() == LSL);
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
@ -1933,10 +1894,10 @@ void Assembler::pkhtb(Register dst,
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2.IsImmediateShiftedRegister());
DCHECK(!src2.rm().is(pc));
DCHECK(src2.rm() != pc);
DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
DCHECK(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
@ -1949,8 +1910,8 @@ void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@ -1962,9 +1923,9 @@ void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@ -1975,8 +1936,8 @@ void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@ -1988,9 +1949,9 @@ void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@ -2001,8 +1962,8 @@ void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@ -2014,9 +1975,9 @@ void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@ -2027,8 +1988,8 @@ void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@ -2039,8 +2000,8 @@ void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.276.
// cond(31-28) | 01101111(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
@ -2052,9 +2013,9 @@ void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
// Instruction details available in ARM DDI 0406C.b, A8.8.273.
// cond(31-28) | 01101111(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(dst != pc);
DCHECK(src1 != pc);
DCHECK(src2 != pc);
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
@ -2065,15 +2026,15 @@ void Assembler::rbit(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.144.
// cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
DCHECK(IsEnabled(ARMv7));
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(dst != pc);
DCHECK(src != pc);
emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
DCHECK(!dst.is(pc));
DCHECK(dst != pc);
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
}
@ -2149,8 +2110,8 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
DCHECK(src.rm().is(no_reg));
DCHECK(!dst1.is(lr)); // r14.
DCHECK(src.rm() == no_reg);
DCHECK(dst1 != lr); // r14.
DCHECK_EQ(0, dst1.code() % 2);
DCHECK_EQ(dst1.code() + 1, dst2.code());
AddrMode3(cond | B7 | B6 | B4, dst1, src);
@ -2159,8 +2120,8 @@ void Assembler::ldrd(Register dst1, Register dst2,
void Assembler::strd(Register src1, Register src2,
const MemOperand& dst, Condition cond) {
DCHECK(dst.rm().is(no_reg));
DCHECK(!src1.is(lr)); // r14.
DCHECK(dst.rm() == no_reg);
DCHECK(src1 != lr); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
@ -2219,7 +2180,7 @@ void Assembler::pld(const MemOperand& address) {
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
// 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
// 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
DCHECK(address.rm().is(no_reg));
DCHECK(address.rm() == no_reg);
DCHECK(address.am() == Offset);
int U = B23;
int offset = address.offset();
@ -2239,7 +2200,7 @@ void Assembler::ldm(BlockAddrMode am,
RegList dst,
Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
DCHECK(base == sp || (dst & sp.bit()) == 0);
AddrMode4(cond | B27 | am | L, base, dst);
@ -2453,7 +2414,7 @@ void Assembler::vldr(const DwVfpRegister dst,
Register scratch = temps.Acquire();
// Larger offsets must be handled by computing the correct address in a
// scratch register.
DCHECK(!base.is(scratch));
DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@ -2507,7 +2468,7 @@ void Assembler::vldr(const SwVfpRegister dst,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@ -2562,7 +2523,7 @@ void Assembler::vstr(const DwVfpRegister src,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@ -2616,7 +2577,7 @@ void Assembler::vstr(const SwVfpRegister src,
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
DCHECK(base != scratch);
if (u == 1) {
add(scratch, base, Operand(offset));
} else {
@ -2651,7 +2612,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@ -2669,7 +2630,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
DCHECK_LE(first.code(), last.code());
DCHECK(VfpRegisterIsAvailable(last));
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@ -2686,7 +2647,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
// first(15-12) | 1010(11-8) | (count/2)
DCHECK_LE(first.code(), last.code());
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@ -2702,7 +2663,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
// first(15-12) | 1011(11-8) | (count/2)
DCHECK_LE(first.code(), last.code());
DCHECK(am == ia || am == ia_w || am == db_w);
DCHECK(!base.is(pc));
DCHECK(base != pc);
int sd, d;
first.split_code(&sd, &d);
@ -2834,7 +2795,7 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// instruction.
mov(scratch, Operand(lo));
vmov(dst, scratch, scratch);
} else if (extra_scratch.is(no_reg)) {
} else if (extra_scratch == no_reg) {
// We only have one spare scratch register.
mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch);
@ -2928,7 +2889,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!src1.is(pc) && !src2.is(pc));
DCHECK(src1 != pc && src2 != pc);
int vm, m;
dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
@ -2945,7 +2906,7 @@ void Assembler::vmov(const Register dst1,
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(!dst1.is(pc) && !dst2.is(pc));
DCHECK(dst1 != pc && dst2 != pc);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
@ -2960,7 +2921,7 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
DCHECK(!src.is(pc));
DCHECK(src != pc);
int sn, n;
dst.split_code(&sn, &n);
emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
@ -2974,7 +2935,7 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
DCHECK(!dst.is(pc));
DCHECK(dst != pc);
int sn, n;
src.split_code(&sn, &n);
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);

514
deps/v8/src/arm/assembler-arm.h

@ -45,8 +45,8 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/boxed-float.h"
#include "src/double.h"
#include "src/float.h"
namespace v8 {
namespace internal {
@ -65,12 +65,17 @@ namespace internal {
V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
#define DOUBLE_REGISTERS(V) \
#define LOW_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
#define NON_LOW_DOUBLE_REGISTERS(V) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
#define DOUBLE_REGISTERS(V) \
LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
#define SIMD128_REGISTERS(V) \
V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
@ -83,251 +88,203 @@ namespace internal {
#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15)
#define C_REGISTERS(V) \
V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
// clang-format on
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
struct Register {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
// The ARM ABI does not specify the usage of register r9, which may be reserved
// as the static base or thread register on some platforms, in which case we
// leave it alone. Adjust the value of kR9Available accordingly:
const int kR9Available = 1; // 1 if available to us, 0 if reserved
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved =
1 << 0 | // r0 a1
1 << 1 | // r1 a2
1 << 2 | // r2 a3
1 << 3; // r3 a4
const int kNumJSCallerSaved = 4;
// Callee-saved registers preserved when switching from C to JavaScript
const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4 (cp in JavaScript code)
1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
const RegList kCallerSaved =
1 << 0 | // r0
1 << 1 | // r1
1 << 2 | // r2
1 << 3 | // r3
1 << 9; // r9
const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static constexpr int kNumRegisters = Code::kAfterLast;
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
Register r = {code};
return r;
}
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
void set_code(int code) {
reg_code = code;
DCHECK(is_valid());
}
kRegAfterLast
};
// Unfortunately we can't make this private in a struct.
int reg_code;
class Register : public RegisterBase<Register, kRegAfterLast> {
friend class RegisterBase;
explicit constexpr Register(int code) : RegisterBase(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
// r7: context register
// r9: lithium scratch
#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
#define DECLARE_REGISTER(R) \
constexpr Register R = Register::from_code<kRegCode_##R>();
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr Register no_reg = {Register::kCode_no_reg};
constexpr Register no_reg = Register::no_reg();
constexpr bool kSimpleFPAliasing = false;
constexpr bool kSimdMaskRegisters = false;
// Single word VFP register.
struct SwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
FLOAT_REGISTERS(REGISTER_CODE)
enum SwVfpRegisterCode {
#define REGISTER_CODE(R) kSwVfpCode_##R,
FLOAT_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static constexpr int kMaxNumRegisters = Code::kAfterLast;
kSwVfpAfterLast
};
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
public:
static constexpr int kSizeInBytes = 4;
bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
static SwVfpRegister from_code(int code) {
SwVfpRegister r = {code};
return r;
}
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = reg_code & 0x1;
*vm = reg_code >> 1;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
int reg_code;
private:
friend class RegisterBase;
explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(SwVfpRegister) &&
sizeof(SwVfpRegister) == sizeof(int),
"SwVfpRegister can efficiently be passed by value");
typedef SwVfpRegister FloatRegister;
// Double word VFP register.
struct DwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static constexpr int kMaxNumRegisters = Code::kAfterLast;
inline static int NumRegisters();
kDoubleAfterLast
};
// Double word VFP register.
class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
public:
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
inline static int NumRegisters();
static DwVfpRegister from_code(int code) {
DwVfpRegister r = {code};
return r;
}
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
int reg_code;
private:
friend class RegisterBase;
friend class LowDwVfpRegister;
explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(DwVfpRegister) &&
sizeof(DwVfpRegister) == sizeof(int),
"DwVfpRegister can efficiently be passed by value");
typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
struct LowDwVfpRegister {
class LowDwVfpRegister
: public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
public:
static constexpr int kMaxNumLowRegisters = 16;
constexpr operator DwVfpRegister() const {
return DwVfpRegister { reg_code };
}
static LowDwVfpRegister from_code(int code) {
LowDwVfpRegister r = { code };
return r;
}
constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
bool is_valid() const {
return 0 <= reg_code && reg_code < kMaxNumLowRegisters;
}
bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
bool is(LowDwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
SwVfpRegister low() const {
SwVfpRegister reg;
reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
SwVfpRegister high() const {
SwVfpRegister reg;
reg.reg_code = (reg_code * 2) + 1;
DCHECK(reg.is_valid());
return reg;
return SwVfpRegister::from_code(code() * 2 + 1);
}
int reg_code;
private:
friend class RegisterBase;
explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
};
enum Simd128RegisterCode {
#define REGISTER_CODE(R) kSimd128Code_##R,
SIMD128_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kSimd128AfterLast
};
// Quad word NEON register.
struct QwNeonRegister {
static constexpr int kMaxNumRegisters = 16;
static QwNeonRegister from_code(int code) {
QwNeonRegister r = { code };
return r;
}
bool is_valid() const {
return (0 <= reg_code) && (reg_code < kMaxNumRegisters);
}
bool is(QwNeonRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
public:
static void split_code(int reg_code, int* vm, int* m) {
DCHECK(from_code(reg_code).is_valid());
int encoded_code = reg_code << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
void split_code(int* vm, int* m) const {
split_code(reg_code, vm, m);
}
DwVfpRegister low() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
DwVfpRegister high() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2 + 1;
DCHECK(reg.is_valid());
return reg;
return DwVfpRegister::from_code(code() * 2 + 1);
}
int reg_code;
private:
friend class RegisterBase;
explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
};
@ -335,92 +292,42 @@ typedef QwNeonRegister QuadRegister;
typedef QwNeonRegister Simd128Register;
enum CRegisterCode {
#define REGISTER_CODE(R) kCCode_##R,
C_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kCAfterLast
};
// Coprocessor register
class CRegister : public RegisterBase<CRegister, kCAfterLast> {
friend class RegisterBase;
explicit constexpr CRegister(int code) : RegisterBase(code) {}
};
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
constexpr SwVfpRegister s0 = { 0 };
constexpr SwVfpRegister s1 = { 1 };
constexpr SwVfpRegister s2 = { 2 };
constexpr SwVfpRegister s3 = { 3 };
constexpr SwVfpRegister s4 = { 4 };
constexpr SwVfpRegister s5 = { 5 };
constexpr SwVfpRegister s6 = { 6 };
constexpr SwVfpRegister s7 = { 7 };
constexpr SwVfpRegister s8 = { 8 };
constexpr SwVfpRegister s9 = { 9 };
constexpr SwVfpRegister s10 = { 10 };
constexpr SwVfpRegister s11 = { 11 };
constexpr SwVfpRegister s12 = { 12 };
constexpr SwVfpRegister s13 = { 13 };
constexpr SwVfpRegister s14 = { 14 };
constexpr SwVfpRegister s15 = { 15 };
constexpr SwVfpRegister s16 = { 16 };
constexpr SwVfpRegister s17 = { 17 };
constexpr SwVfpRegister s18 = { 18 };
constexpr SwVfpRegister s19 = { 19 };
constexpr SwVfpRegister s20 = { 20 };
constexpr SwVfpRegister s21 = { 21 };
constexpr SwVfpRegister s22 = { 22 };
constexpr SwVfpRegister s23 = { 23 };
constexpr SwVfpRegister s24 = { 24 };
constexpr SwVfpRegister s25 = { 25 };
constexpr SwVfpRegister s26 = { 26 };
constexpr SwVfpRegister s27 = { 27 };
constexpr SwVfpRegister s28 = { 28 };
constexpr SwVfpRegister s29 = { 29 };
constexpr SwVfpRegister s30 = { 30 };
constexpr SwVfpRegister s31 = { 31 };
constexpr DwVfpRegister no_dreg = { -1 };
constexpr LowDwVfpRegister d0 = { 0 };
constexpr LowDwVfpRegister d1 = { 1 };
constexpr LowDwVfpRegister d2 = { 2 };
constexpr LowDwVfpRegister d3 = { 3 };
constexpr LowDwVfpRegister d4 = { 4 };
constexpr LowDwVfpRegister d5 = { 5 };
constexpr LowDwVfpRegister d6 = { 6 };
constexpr LowDwVfpRegister d7 = { 7 };
constexpr LowDwVfpRegister d8 = { 8 };
constexpr LowDwVfpRegister d9 = { 9 };
constexpr LowDwVfpRegister d10 = { 10 };
constexpr LowDwVfpRegister d11 = { 11 };
constexpr LowDwVfpRegister d12 = { 12 };
constexpr LowDwVfpRegister d13 = { 13 };
constexpr LowDwVfpRegister d14 = { 14 };
constexpr LowDwVfpRegister d15 = { 15 };
constexpr DwVfpRegister d16 = { 16 };
constexpr DwVfpRegister d17 = { 17 };
constexpr DwVfpRegister d18 = { 18 };
constexpr DwVfpRegister d19 = { 19 };
constexpr DwVfpRegister d20 = { 20 };
constexpr DwVfpRegister d21 = { 21 };
constexpr DwVfpRegister d22 = { 22 };
constexpr DwVfpRegister d23 = { 23 };
constexpr DwVfpRegister d24 = { 24 };
constexpr DwVfpRegister d25 = { 25 };
constexpr DwVfpRegister d26 = { 26 };
constexpr DwVfpRegister d27 = { 27 };
constexpr DwVfpRegister d28 = { 28 };
constexpr DwVfpRegister d29 = { 29 };
constexpr DwVfpRegister d30 = { 30 };
constexpr DwVfpRegister d31 = { 31 };
constexpr QwNeonRegister q0 = { 0 };
constexpr QwNeonRegister q1 = { 1 };
constexpr QwNeonRegister q2 = { 2 };
constexpr QwNeonRegister q3 = { 3 };
constexpr QwNeonRegister q4 = { 4 };
constexpr QwNeonRegister q5 = { 5 };
constexpr QwNeonRegister q6 = { 6 };
constexpr QwNeonRegister q7 = { 7 };
constexpr QwNeonRegister q8 = { 8 };
constexpr QwNeonRegister q9 = { 9 };
constexpr QwNeonRegister q10 = { 10 };
constexpr QwNeonRegister q11 = { 11 };
constexpr QwNeonRegister q12 = { 12 };
constexpr QwNeonRegister q13 = { 13 };
constexpr QwNeonRegister q14 = { 14 };
constexpr QwNeonRegister q15 = { 15 };
#define DECLARE_FLOAT_REGISTER(R) \
constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
#undef DECLARE_FLOAT_REGISTER
#define DECLARE_LOW_DOUBLE_REGISTER(R) \
constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
#undef DECLARE_LOW_DOUBLE_REGISTER
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
#undef DECLARE_DOUBLE_REGISTER
constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
#define DECLARE_SIMD128_REGISTER(R) \
constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
#undef DECLARE_SIMD128_REGISTER
// Aliases for double registers.
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
@ -433,43 +340,12 @@ constexpr LowDwVfpRegister kScratchDoubleReg = d14;
constexpr QwNeonRegister kScratchQuadReg = q7;
constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
// Coprocessor register
struct CRegister {
bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
int reg_code;
};
constexpr CRegister no_creg = { -1 };
constexpr CRegister cr0 = { 0 };
constexpr CRegister cr1 = { 1 };
constexpr CRegister cr2 = { 2 };
constexpr CRegister cr3 = { 3 };
constexpr CRegister cr4 = { 4 };
constexpr CRegister cr5 = { 5 };
constexpr CRegister cr6 = { 6 };
constexpr CRegister cr7 = { 7 };
constexpr CRegister cr8 = { 8 };
constexpr CRegister cr9 = { 9 };
constexpr CRegister cr10 = { 10 };
constexpr CRegister cr11 = { 11 };
constexpr CRegister cr12 = { 12 };
constexpr CRegister cr13 = { 13 };
constexpr CRegister cr14 = { 14 };
constexpr CRegister cr15 = { 15 };
constexpr CRegister no_creg = CRegister::no_reg();
#define DECLARE_C_REGISTER(R) \
constexpr CRegister R = CRegister::from_code<kCCode_##R>();
C_REGISTERS(DECLARE_C_REGISTER)
#undef DECLARE_C_REGISTER
// Coprocessor number
enum Coprocessor {
@ -491,7 +367,6 @@ enum Coprocessor {
p15 = 15
};
// -----------------------------------------------------------------------------
// Machine instruction Operands
@ -531,10 +406,8 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
bool IsRegister() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
shift_imm_ == 0;
}
// Return true if this is a register operand shifted with an immediate.
bool IsImmediateShiftedRegister() const {
@ -586,8 +459,8 @@ class Operand BASE_EMBEDDED {
private:
Register rm_;
Register rs_;
Register rm_ = no_reg;
Register rs_ = no_reg;
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union Value {
@ -631,13 +504,13 @@ class MemOperand BASE_EMBEDDED {
}
void set_offset(int32_t offset) {
DCHECK(rm_.is(no_reg));
offset_ = offset;
DCHECK(rm_ == no_reg);
offset_ = offset;
}
uint32_t offset() const {
DCHECK(rm_.is(no_reg));
return offset_;
DCHECK(rm_ == no_reg);
return offset_;
}
Register rn() const { return rn_; }
@ -810,21 +683,9 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static constexpr int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 8;
static constexpr int kDebugBreakSlotInstructions = 4;
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
@ -1559,11 +1420,6 @@ class Assembler : public AssemblerBase {
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
};
// Debugging
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@ -1755,13 +1611,13 @@ class Assembler : public AssemblerBase {
bool VfpRegisterIsAvailable(DwVfpRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
(reg.code() < LowDwVfpRegister::kNumRegisters);
}
bool VfpRegisterIsAvailable(QwNeonRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
(reg.code() < LowDwVfpRegister::kNumRegisters / 2);
}
inline void emit(Instr x);

1236
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

56
deps/v8/src/arm/code-stubs-arm.h

@ -5,15 +5,10 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
#include "src/arm/frames-arm.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Compares two flat one-byte strings and returns result in r0.
@ -77,49 +72,9 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
Assembler::kInstrSize);
static Mode GetMode(Code* stub);
if (Assembler::IsBranch(first_instruction)) {
return INCREMENTAL;
}
DCHECK(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
DCHECK(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
DCHECK(GetMode(stub) == mode);
Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
2 * Assembler::kInstrSize);
}
static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@ -129,12 +84,11 @@ class RecordWriteStub: public PlatformCodeStub {
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0) {
scratch0_(scratch0),
scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}

79
deps/v8/src/arm/codegen-arm.cc

@ -298,24 +298,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#undef __
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
@ -416,67 +398,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
#ifdef DEBUG
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushStandardFrame(r1);
patcher->masm()->nop(ip.code());
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
Address target_address = Memory::Address_at(
sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
PatchingAssembler patcher(Assembler::IsolateData(isolate), sequence,
young_length / Assembler::kInstrSize);
patcher.add(r0, pc, Operand(-8));
patcher.ldr(pc, MemOperand(pc, -4));
patcher.emit_code_stub_address(stub);
patcher.FlushICache(isolate);
}
}
} // namespace internal
} // namespace v8

83
deps/v8/src/arm/deoptimizer-arm.cc

@ -5,7 +5,6 @@
#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@ -15,78 +14,6 @@ namespace internal {
const int Deoptimizer::table_entry_size_ = 8;
int Deoptimizer::patch_size() {
const int kCallInstructionSizeInWords = 3;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
{
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
PatchingAssembler patcher(Assembler::IsolateData(isolate),
code_start_address + osr_offset, 1);
patcher.bkpt(0);
patcher.FlushICache(isolate);
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize();
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->CallDeoptimizer(deopt_entry);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@ -100,8 +27,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Everything but pc, lr and ip which will be saved but not restored.
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kMaxNumRegisters;
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kMaxNumRegisters;
const int kDoubleRegsSize = kDoubleSize * DwVfpRegister::kNumRegisters;
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 13);
@ -188,9 +115,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
@ -201,7 +128,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Copy VFP registers to
// float_registers_[FloatRegister::kMaxNumAllocatableRegisters]
// float_registers_[FloatRegister::kNumAllocatableRegisters]
int float_regs_offset = FrameDescription::float_registers_offset();
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);

6
deps/v8/src/arm/disasm-arm.cc

@ -40,7 +40,7 @@
namespace v8 {
namespace internal {
const auto GetRegConfig = RegisterConfiguration::Crankshaft;
const auto GetRegConfig = RegisterConfiguration::Default;
//------------------------------------------------------------------------------
@ -726,7 +726,7 @@ void Decoder::Format(Instruction* instr, const char* format) {
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
if(!(condition)) { \
Unknown(instr); \
@ -2602,7 +2602,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
} else if (instruction_bits == kCodeAgeJumpInstruction) {
// The code age prologue has a constant immediatly following the jump
// The code age prologue has a constant immediately following the jump
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);

8
deps/v8/src/arm/eh-frame-arm.cc

@ -27,13 +27,13 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
case Register::kCode_fp:
case kRegCode_fp:
return kFpDwarfCode;
case Register::kCode_sp:
case kRegCode_sp:
return kSpDwarfCode;
case Register::kCode_lr:
case kRegCode_lr:
return kLrDwarfCode;
case Register::kCode_r0:
case kRegCode_r0:
return kR0DwarfCode;
default:
UNIMPLEMENTED();

12
deps/v8/src/arm/frames-arm.cc → deps/v8/src/arm/frame-constants-arm.cc

@ -5,24 +5,24 @@
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"
#include "src/frames.h"
#include "src/frame-constants.h"
#include "src/macro-assembler.h"
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/assembler-arm.h"
#include "src/arm/frames-arm.h"
#include "src/arm/frame-constants-arm.h"
#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
}
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
} // namespace internal
} // namespace v8

48
deps/v8/src/arm/frame-constants-arm.h

@ -0,0 +1,48 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
namespace v8 {
namespace internal {
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
} // namespace v8
#endif // V8_ARM_FRAMES_ARM_H_

117
deps/v8/src/arm/frames-arm.h

@ -1,117 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
namespace v8 {
namespace internal {
// The ARM ABI does not specify the usage of register r9, which may be reserved
// as the static base or thread register on some platforms, in which case we
// leave it alone. Adjust the value of kR9Available accordingly:
const int kR9Available = 1; // 1 if available to us, 0 if reserved
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved =
1 << 0 | // r0 a1
1 << 1 | // r1 a2
1 << 2 | // r2 a3
1 << 3; // r3 a4
const int kNumJSCallerSaved = 4;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript
const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4 (cp in JavaScript code)
1 << 8 | // r8 v5 (pp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
const RegList kCallerSaved =
1 << 0 | // r0
1 << 1 | // r1
1 << 2 | // r2
1 << 3 | // r3
1 << 9; // r9
const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
} // namespace v8
#endif // V8_ARM_FRAMES_ARM_H_

120
deps/v8/src/arm/interface-descriptors-arm.cc

@ -22,6 +22,18 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
default_stub_registers);
}
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
data->RestrictAllocatableRegisters(default_stub_registers,
arraysize(default_stub_registers));
CHECK_LE(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
return r1;
}
@ -49,8 +61,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
const Register StringConcatDescriptor::ArgumentsCountRegister() { return r0; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
@ -80,75 +90,12 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -300,14 +247,6 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@ -321,30 +260,6 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r1 -- lhs
// r0 -- rhs
// r4 -- slot id
// r3 -- vector
Register registers[] = {r1, r0, r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CountOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@ -411,17 +326,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r1, // target to call checked to be Array function
r2, // allocation site feedback if available, undefined otherwise
r3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

1014
deps/v8/src/arm/macro-assembler-arm.cc

File diff suppressed because it is too large

392
deps/v8/src/arm/macro-assembler-arm.h

@ -8,27 +8,26 @@
#include "src/arm/assembler-arm.h"
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_r0};
const Register kReturnRegister1 = {Register::kCode_r1};
const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
constexpr Register kReturnRegister0 = r0;
constexpr Register kReturnRegister1 = r1;
constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
// ----------------------------------------------------------------------------
// Static helper functions
@ -40,8 +39,8 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
const Register cp = {Register::kCode_r7}; // JavaScript context pointer.
const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer.
constexpr Register cp = r7; // JavaScript context pointer.
constexpr Register kRootRegister = r10; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
@ -89,13 +88,7 @@ enum TargetAddressStorageMode {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
@ -118,7 +111,7 @@ class TurboAssembler : public Assembler {
// Generates function and stub prologue code.
void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
void Prologue();
// Push a standard frame, consisting of lr, fp, context and JS function
void PushStandardFrame(Register function_reg);
@ -210,7 +203,7 @@ class TurboAssembler : public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(!src1.is(src2));
DCHECK(src1 != src2);
if (src1.code() > src2.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
} else {
@ -353,6 +346,13 @@ class TurboAssembler : public Assembler {
bool check_constant_pool = true);
void Call(Label* target);
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
Call(target, rmode);
CheckConstPool(false, false);
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
@ -389,6 +389,42 @@ class TurboAssembler : public Assembler {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pops 32 double
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg) const;
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@ -398,7 +434,7 @@ class TurboAssembler : public Assembler {
// NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
// code. The specific behaviour depends on supported instructions.
//
// These functions assume (and assert) that !left.is(right). It is permitted
// These functions assume (and assert) that left!=right. It is permitted
// for the result to alias either input register.
void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
Label* out_of_line);
@ -433,7 +469,7 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
Condition cond = al) {
if (!src.IsRegister() || !src.rm().is(dst) || sbit != LeaveCC) {
if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
mov(dst, src, sbit, cond);
}
}
@ -547,8 +583,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
int jit_cookie() const { return jit_cookie_; }
// Used for patching in calls to the deoptimizer.
void CallDeoptimizer(Address target);
static int CallDeoptimizerSize();
@ -578,27 +612,13 @@ class MacroAssembler : public TurboAssembler {
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
// The scratch register is not used for ARMv7.
// scratch can be the same register as src (in which case it is trashed), but
// not the same as dst.
void Bfi(Register dst, Register src, Register scratch, int lsb, int width,
Condition cond = al);
void PushObject(Handle<Object> object);
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
// Store an object to the root table.
void StoreRoot(Register source, Heap::RootListIndex index,
Condition cond = al);
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
@ -660,11 +680,6 @@ class MacroAssembler : public TurboAssembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
@ -679,67 +694,10 @@ class MacroAssembler : public TurboAssembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
Register dst2,
const MemOperand& src,
Condition cond = al);
// Store two consecutive registers to two consecutive memory locations.
void Strd(Register src1,
Register src2,
const MemOperand& dst,
Condition cond = al);
void Vmov(const DwVfpRegister dst, Double imm,
const Register scratch = no_reg);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
void LoadNumber(Register object,
LowDwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number);
// Loads the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
void LoadNumberAsInt32Double(Register object,
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
LowDwVfpRegister double_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
void LoadNumberAsInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
LowDwVfpRegister double_scratch1,
Label* not_int32);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@ -753,8 +711,6 @@ class MacroAssembler : public TurboAssembler {
bool restore_context,
bool argument_count_is_length = false);
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
@ -779,8 +735,7 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@ -789,27 +744,15 @@ class MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void IsObjectJSStringType(Register object,
Register scratch,
Label* fail);
const ParameterCount& actual, InvokeFlag flag);
// Frame restart support
void MaybeDropFrames();
@ -823,46 +766,6 @@ class MacroAssembler : public TurboAssembler {
// Must preserve the result register.
void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
void GetNumberHash(Register t0, Register scratch);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
// Check if the given instruction is a 'type' marker.
// i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
static inline int GetCodeMarker(Instr instr) {
int dst_reg_offset = 12;
int dst_mask = 0xf << dst_reg_offset;
int src_mask = 0xf;
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
int src_reg = instr & src_mask;
uint32_t non_register_mask = ~(dst_mask | src_mask);
uint32_t mov_mask = al | 13 << 21;
// Return <n> if we have a mov rn rn, else return -1.
int type = ((instr & non_register_mask) == mov_mask) &&
(dst_reg == src_reg) &&
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
// ---------------------------------------------------------------------------
// Allocation support
@ -880,25 +783,6 @@ class MacroAssembler : public TurboAssembler {
Label* gc_required,
AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
@ -994,56 +878,16 @@ class MacroAssembler : public TurboAssembler {
b(ne, if_not_equal);
}
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged.
Condition IsObjectStringType(Register obj, Register type,
Condition cond = al);
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Load the value of a smi object into a double register.
// The register value must be between d0 and d15.
void SmiToDouble(LowDwVfpRegister value, Register smi);
// Check if a double can be exactly represented as a signed 32-bit integer.
// Z flag set to one if true.
void TestDoubleIsInt32(DwVfpRegister double_input,
LowDwVfpRegister double_scratch);
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch);
// Floor a double and writes the value to the result register.
// Go to exact if the conversion is exact (to be able to test -0),
// fall through calling code if an overflow occurred, else go to done.
// In return, input_high is loaded with high bits of input.
void TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
LowDwVfpRegister double_scratch,
Label* done,
Label* exact);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
// Does a runtime check for 16/32 FP registers. Either way, pops 32 double
// values to location, restoring [d0..(d15|d31)].
void RestoreFPRegs(Register location, Register scratch);
// ---------------------------------------------------------------------------
// Runtime calls
@ -1058,10 +902,6 @@ class MacroAssembler : public TurboAssembler {
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
@ -1076,10 +916,6 @@ class MacroAssembler : public TurboAssembler {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
@ -1095,59 +931,20 @@ class MacroAssembler : public TurboAssembler {
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
// ---------------------------------------------------------------------------
// Number utilities
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
// the register scratch contains the value of (reg - 1) when control falls
// through.
void JumpIfNotPowerOfTwoOrZero(Register reg,
Register scratch,
Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// ---------------------------------------------------------------------------
// Smi utilities
void SmiTag(Register reg, SBit s = LeaveCC);
void SmiTag(Register dst, Register src, SBit s = LeaveCC);
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
void TrySmiTag(Register reg, Label* not_a_smi) {
TrySmiTag(reg, reg, not_a_smi);
}
void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
SmiTag(scratch, src, SetCC);
b(vs, not_a_smi);
mov(reg, scratch);
}
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Test if the register contains a smi (Z == 0 (eq) if true).
void SmiTst(Register value);
void NonNegativeSmiTst(Register value);
// Jump if either of the registers contain a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
@ -1173,18 +970,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number);
// ---------------------------------------------------------------------------
// String utilities
@ -1196,13 +981,6 @@ class MacroAssembler : public TurboAssembler {
Register scratch2,
Label* failure);
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not.
void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
Register scratch1,
Register scratch2,
Label* not_flat_one_byte_strings);
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@ -1211,11 +989,7 @@ class MacroAssembler : public TurboAssembler {
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void ClampUint8(Register output_reg, Register input_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
@ -1229,32 +1003,14 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg);
}
template <typename Field>
void DecodeFieldToSmi(Register dst, Register src);
template<typename Field>
void DecodeFieldToSmi(Register reg) {
DecodeField<Field>(reg, reg);
}
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
private:
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
@ -1271,10 +1027,6 @@ class MacroAssembler : public TurboAssembler {
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
int jit_cookie_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.

7
deps/v8/src/arm/simulator-arm.cc

@ -244,8 +244,7 @@ void ArmDebugger::Debug() {
value = GetRegisterValue(i);
PrintF(
"%3s: 0x%08x %10d",
RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
i),
RegisterConfiguration::Default()->GetGeneralRegisterName(i),
value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
@ -5220,7 +5219,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case Neon16: {
uint16_t src[8];
get_neon_register(Vm, src);
for (int i = 0; i < 4; i++) {
for (int i = 0; i < 2; i++) {
std::swap(src[i * 4], src[i * 4 + 3]);
std::swap(src[i * 4 + 1], src[i * 4 + 2]);
}
@ -5833,7 +5832,7 @@ void Simulator::Execute() {
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
// we reach the particular instruction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;

2
deps/v8/src/arm/simulator-arm.h

@ -444,7 +444,7 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
// Syncronization primitives. See ARM DDI 0406C.b, A2.9.
// Synchronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,

259
deps/v8/src/arm64/assembler-arm64-inl.h

@ -27,133 +27,21 @@ void RelocInfo::apply(intptr_t delta) {
*p += delta; // Relocate entry.
}
inline int CPURegister::code() const {
DCHECK(IsValid());
return reg_code;
}
inline CPURegister::RegisterType CPURegister::type() const {
DCHECK(IsValidOrNone());
return reg_type;
}
inline RegList CPURegister::Bit() const {
DCHECK(static_cast<size_t>(reg_code) < (sizeof(RegList) * kBitsPerByte));
return IsValid() ? 1UL << reg_code : 0;
}
inline int CPURegister::SizeInBits() const {
DCHECK(IsValid());
return reg_size;
}
inline int CPURegister::SizeInBytes() const {
DCHECK(IsValid());
DCHECK(SizeInBits() % 8 == 0);
return reg_size / 8;
}
inline bool CPURegister::Is8Bits() const {
DCHECK(IsValid());
return reg_size == 8;
}
inline bool CPURegister::Is16Bits() const {
DCHECK(IsValid());
return reg_size == 16;
}
inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid());
return reg_size == 32;
}
inline bool CPURegister::Is64Bits() const {
DCHECK(IsValid());
return reg_size == 64;
}
inline bool CPURegister::Is128Bits() const {
DCHECK(IsValid());
return reg_size == 128;
}
inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
DCHECK(!IsNone());
return true;
} else {
DCHECK(IsNone());
return false;
}
}
inline bool CPURegister::IsValidRegister() const {
return IsRegister() &&
((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) &&
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
inline bool CPURegister::IsValidVRegister() const {
return IsVRegister() &&
((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
(reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
(reg_size == kQRegSizeInBits)) &&
(reg_code < kNumberOfVRegisters);
}
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
DCHECK((reg_type != kNoRegister) || (reg_size == 0));
return reg_type == kNoRegister;
}
inline bool CPURegister::Is(const CPURegister& other) const {
DCHECK(IsValidOrNone() && other.IsValidOrNone());
return Aliases(other) && (reg_size == other.reg_size);
}
inline bool CPURegister::Aliases(const CPURegister& other) const {
DCHECK(IsValidOrNone() && other.IsValidOrNone());
return (reg_code == other.reg_code) && (reg_type == other.reg_type);
}
inline bool CPURegister::IsRegister() const {
return reg_type == kRegister;
}
inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type);
}
inline bool CPURegister::IsValidOrNone() const {
return IsValid() || IsNone();
return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
}
inline bool CPURegister::IsZero() const {
DCHECK(IsValid());
return IsRegister() && (reg_code == kZeroRegCode);
return IsRegister() && (reg_code_ == kZeroRegCode);
}
inline bool CPURegister::IsSP() const {
DCHECK(IsValid());
return IsRegister() && (reg_code == kSPRegInternalCode);
return IsRegister() && (reg_code_ == kSPRegInternalCode);
}
@ -255,44 +143,53 @@ inline VRegister VRegister::VRegFromCode(unsigned code) {
}
inline Register CPURegister::W() const {
DCHECK(IsValidRegister());
return Register::WRegFromCode(reg_code);
DCHECK(IsRegister());
return Register::WRegFromCode(reg_code_);
}
inline Register CPURegister::Reg() const {
DCHECK(IsRegister());
return Register::Create(reg_code_, reg_size_);
}
inline VRegister CPURegister::VReg() const {
DCHECK(IsVRegister());
return VRegister::Create(reg_code_, reg_size_);
}
inline Register CPURegister::X() const {
DCHECK(IsValidRegister());
return Register::XRegFromCode(reg_code);
DCHECK(IsRegister());
return Register::XRegFromCode(reg_code_);
}
inline VRegister CPURegister::V() const {
DCHECK(IsValidVRegister());
return VRegister::VRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::VRegFromCode(reg_code_);
}
inline VRegister CPURegister::B() const {
DCHECK(IsValidVRegister());
return VRegister::BRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::BRegFromCode(reg_code_);
}
inline VRegister CPURegister::H() const {
DCHECK(IsValidVRegister());
return VRegister::HRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::HRegFromCode(reg_code_);
}
inline VRegister CPURegister::S() const {
DCHECK(IsValidVRegister());
return VRegister::SRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::SRegFromCode(reg_code_);
}
inline VRegister CPURegister::D() const {
DCHECK(IsValidVRegister());
return VRegister::DRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::DRegFromCode(reg_code_);
}
inline VRegister CPURegister::Q() const {
DCHECK(IsValidVRegister());
return VRegister::QRegFromCode(reg_code);
DCHECK(IsVRegister());
return VRegister::QRegFromCode(reg_code_);
}
@ -542,15 +439,12 @@ MemOperand::MemOperand(Register base,
DCHECK(shift == LSL);
}
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
: base_(base), addrmode_(addrmode) {
: base_(base), regoffset_(NoReg), addrmode_(addrmode) {
DCHECK(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
offset_ = offset.ImmediateValue();
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
DCHECK((addrmode == Offset) || (addrmode == PostIndex));
@ -811,73 +705,6 @@ void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
}
}
Handle<Cell> RelocInfo::target_cell_handle() {
UNIMPLEMENTED();
Cell *null_cell = NULL;
return Handle<Cell>(null_cell);
}
Cell* RelocInfo::target_cell() {
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
UNIMPLEMENTED();
}
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
Handle<Code> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on ARM64.
return Handle<Code>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
// Read the stub entry point from the code age sequence.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
// Overwrite the stub entry point in the code age sequence. This is loaded as
// a literal so there is no need to call FlushICache here.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
Memory::Address_at(stub_entry_address) = stub->instruction_start();
}
Address RelocInfo::debug_call_address() {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// For the above sequences the Relocinfo points to the load literal loading
// the call address.
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_debug_call_address(Isolate* isolate, Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(isolate, pc_, host_, target);
if (host() != NULL) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
void RelocInfo::WipeOut(Isolate* isolate) {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -896,43 +723,15 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
DCHECK(rt.IsValid());
if (rt.IsRegister()) {

45
deps/v8/src/arm64/assembler-arm64.cc

@ -31,10 +31,10 @@
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
namespace v8 {
@ -176,33 +176,21 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
Address RelocInfo::embedded_address() const {
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
uint32_t RelocInfo::wasm_memory_size_reference() {
DCHECK(IsWasmMemorySizeReference(rmode_));
uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
Address RelocInfo::wasm_global_reference() {
DCHECK(IsWasmGlobalReference(rmode_));
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Isolate* isolate, Address address, ICacheFlushMode flush_mode) {
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
@ -210,7 +198,7 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
Register candidate = Register::from_code(code);
@ -236,10 +224,10 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
for (unsigned i = 0; i < arraysize(regs); i++) {
if (regs[i].IsRegister()) {
number_of_valid_regs++;
unique_regs |= regs[i].Bit();
unique_regs |= regs[i].bit();
} else if (regs[i].IsVRegister()) {
number_of_valid_fpregs++;
unique_fpregs |= regs[i].Bit();
unique_fpregs |= regs[i].bit();
} else {
DCHECK(!regs[i].IsValid());
}
@ -341,7 +329,6 @@ bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
@ -3511,7 +3498,7 @@ void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value) {
NEON_3SAME_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
#define NEON_FP3SAME_LIST(V) \
#define NEON_FP3SAME_LIST_V2(V) \
V(fadd, NEON_FADD, FADD) \
V(fsub, NEON_FSUB, FSUB) \
V(fmul, NEON_FMUL, FMUL) \
@ -3551,7 +3538,7 @@ NEON_3SAME_LIST(DEFINE_ASM_FUNC)
} \
NEONFP3Same(vd, vn, vm, op); \
}
NEON_FP3SAME_LIST(DEFINE_ASM_FUNC)
NEON_FP3SAME_LIST_V2(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC
void Assembler::addp(const VRegister& vd, const VRegister& vn) {
@ -4764,17 +4751,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
bool write_reloc_info = true;
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
if ((rmode == RelocInfo::COMMENT) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
DCHECK(RelocInfo::IsComment(rmode) || RelocInfo::IsDeoptReason(rmode) ||
RelocInfo::IsDeoptId(rmode) || RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.

371
deps/v8/src/arm64/assembler-arm64.h

@ -21,7 +21,6 @@
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// clang-format off
@ -70,72 +69,111 @@ namespace internal {
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count()
// Some CPURegister methods can return Register and VRegister types, so we
// need to declare them in advance.
struct Register;
struct VRegister;
class Register;
class VRegister;
struct CPURegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
enum RegisterCode {
#define REGISTER_CODE(R) kRegCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
kRegAfterLast
};
class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
public:
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
kVRegister,
kNoRegister
};
constexpr CPURegister() : CPURegister(0, 0, CPURegister::kNoRegister) {}
constexpr CPURegister(int reg_code, int reg_size, RegisterType reg_type,
int lane_count = 1)
: reg_code(reg_code),
reg_size(reg_size),
reg_type(reg_type),
lane_count(lane_count) {}
static CPURegister Create(int reg_code, int reg_size, RegisterType reg_type,
int lane_count = 1) {
CPURegister r = {reg_code, reg_size, reg_type, lane_count};
return r;
}
int code() const;
RegisterType type() const;
RegList Bit() const;
int SizeInBits() const;
int SizeInBytes() const;
bool Is8Bits() const;
bool Is16Bits() const;
bool Is32Bits() const;
bool Is64Bits() const;
bool Is128Bits() const;
bool IsValid() const;
bool IsValidOrNone() const;
bool IsValidRegister() const;
bool IsValidVRegister() const;
bool IsNone() const;
bool Is(const CPURegister& other) const;
bool Aliases(const CPURegister& other) const;
static constexpr CPURegister no_reg() {
return CPURegister{0, 0, kNoRegister};
}
template <int code, int size, RegisterType type>
static constexpr CPURegister Create() {
static_assert(IsValid(code, size, type), "Cannot create invalid registers");
return CPURegister{code, size, type};
}
static CPURegister Create(int code, int size, RegisterType type) {
DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
RegisterType type() const { return reg_type_; }
RegList bit() const {
DCHECK(static_cast<size_t>(reg_code_) < (sizeof(RegList) * kBitsPerByte));
return IsValid() ? 1UL << reg_code_ : 0;
}
int SizeInBits() const {
DCHECK(IsValid());
return reg_size_;
}
int SizeInBytes() const {
DCHECK(IsValid());
DCHECK(SizeInBits() % 8 == 0);
return reg_size_ / 8;
}
bool Is8Bits() const {
DCHECK(IsValid());
return reg_size_ == 8;
}
bool Is16Bits() const {
DCHECK(IsValid());
return reg_size_ == 16;
}
bool Is32Bits() const {
DCHECK(IsValid());
return reg_size_ == 32;
}
bool Is64Bits() const {
DCHECK(IsValid());
return reg_size_ == 64;
}
bool Is128Bits() const {
DCHECK(IsValid());
return reg_size_ == 128;
}
bool IsValid() const { return reg_type_ != kNoRegister; }
bool IsNone() const { return reg_type_ == kNoRegister; }
bool Is(const CPURegister& other) const {
return Aliases(other) && (reg_size_ == other.reg_size_);
}
bool Aliases(const CPURegister& other) const {
return (reg_code_ == other.reg_code_) && (reg_type_ == other.reg_type_);
}
bool IsZero() const;
bool IsSP() const;
bool IsRegister() const;
bool IsVRegister() const;
bool IsRegister() const { return reg_type_ == kRegister; }
bool IsVRegister() const { return reg_type_ == kVRegister; }
bool IsFPRegister() const { return IsS() || IsD(); }
bool IsW() const { return IsValidRegister() && Is32Bits(); }
bool IsX() const { return IsValidRegister() && Is64Bits(); }
bool IsW() const { return IsRegister() && Is32Bits(); }
bool IsX() const { return IsRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
@ -150,6 +188,9 @@ struct CPURegister {
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
Register Reg() const;
VRegister VReg() const;
Register X() const;
Register W() const;
VRegister V() const;
@ -165,25 +206,51 @@ struct CPURegister {
bool is(const CPURegister& other) const { return Is(other); }
bool is_valid() const { return IsValid(); }
int reg_code;
int reg_size;
RegisterType reg_type;
int lane_count;
};
protected:
int reg_size_;
RegisterType reg_type_;
friend class RegisterBase;
struct Register : public CPURegister {
static Register Create(int code, int size) {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
constexpr CPURegister(int code, int size, RegisterType type)
: RegisterBase(code), reg_size_(size), reg_type_(type) {}
static constexpr bool IsValidRegister(int code, int size) {
return (size == kWRegSizeInBits || size == kXRegSizeInBits) &&
(code < kNumberOfRegisters || code == kSPRegInternalCode);
}
constexpr Register() : CPURegister() {}
static constexpr bool IsValidVRegister(int code, int size) {
return (size == kBRegSizeInBits || size == kHRegSizeInBits ||
size == kSRegSizeInBits || size == kDRegSizeInBits ||
size == kQRegSizeInBits) &&
code < kNumberOfVRegisters;
}
constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
static constexpr bool IsValid(int code, int size, RegisterType type) {
return (type == kRegister && IsValidRegister(code, size)) ||
(type == kVRegister && IsValidVRegister(code, size));
}
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
static constexpr bool IsNone(int code, int size, RegisterType type) {
return type == kNoRegister && code == 0 && size == 0;
}
};
static_assert(IS_TRIVIALLY_COPYABLE(CPURegister),
"CPURegister can efficiently be passed by value");
class Register : public CPURegister {
public:
static constexpr Register no_reg() { return Register(CPURegister::no_reg()); }
template <int code, int size>
static constexpr Register Create() {
return Register(CPURegister::Create<code, size, CPURegister::kRegister>());
}
static Register Create(int code, int size) {
return Register(CPURegister::Create(code, size, CPURegister::kRegister));
}
static Register XRegFromCode(unsigned code);
@ -193,10 +260,6 @@ struct Register : public CPURegister {
// These memebers are necessary for compilation.
// A few of them may be unused for now.
static constexpr int kNumRegisters = kNumberOfRegisters;
STATIC_ASSERT(kNumRegisters == Code::kAfterLast);
static int NumRegisters() { return kNumRegisters; }
// We allow crankshaft to use the following registers:
// - x0 to x15
// - x18 to x24
@ -218,26 +281,40 @@ struct Register : public CPURegister {
}
// End of V8 compatibility section -----------------------
//
private:
constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
};
static_assert(IS_TRIVIALLY_COPYABLE(Register),
"Register can efficiently be passed by value");
constexpr bool kSimpleFPAliasing = true;
constexpr bool kSimdMaskRegisters = false;
struct VRegister : public CPURegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
kDoubleAfterLast
};
class VRegister : public CPURegister {
public:
static constexpr VRegister no_reg() {
return VRegister(CPURegister::no_reg(), 0);
}
static VRegister Create(int reg_code, int reg_size, int lane_count = 1) {
DCHECK(base::bits::IsPowerOfTwo(lane_count) && (lane_count <= 16));
VRegister v(CPURegister::Create(reg_code, reg_size, CPURegister::kVRegister,
lane_count));
DCHECK(v.IsValidVRegister());
return v;
template <int code, int size, int lane_count = 1>
static constexpr VRegister Create() {
static_assert(IsValidLaneCount(lane_count), "Invalid lane count");
return VRegister(CPURegister::Create<code, size, kVRegister>(), lane_count);
}
static VRegister Create(int code, int size, int lane_count = 1) {
DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
static VRegister Create(int reg_code, VectorFormat format) {
@ -246,15 +323,6 @@ struct VRegister : public CPURegister {
return VRegister::Create(reg_code, reg_size, reg_count);
}
constexpr VRegister() : CPURegister() {}
constexpr explicit VRegister(const CPURegister& r) : CPURegister(r) {}
bool IsValid() const {
DCHECK(IsVRegister() || IsNone());
return IsValidVRegister();
}
static VRegister BRegFromCode(unsigned code);
static VRegister HRegFromCode(unsigned code);
static VRegister SRegFromCode(unsigned code);
@ -287,14 +355,14 @@ struct VRegister : public CPURegister {
return VRegister::Create(code(), kDRegSizeInBits, 1);
}
bool Is8B() const { return (Is64Bits() && (lane_count == 8)); }
bool Is16B() const { return (Is128Bits() && (lane_count == 16)); }
bool Is4H() const { return (Is64Bits() && (lane_count == 4)); }
bool Is8H() const { return (Is128Bits() && (lane_count == 8)); }
bool Is2S() const { return (Is64Bits() && (lane_count == 2)); }
bool Is4S() const { return (Is128Bits() && (lane_count == 4)); }
bool Is1D() const { return (Is64Bits() && (lane_count == 1)); }
bool Is2D() const { return (Is128Bits() && (lane_count == 2)); }
bool Is8B() const { return (Is64Bits() && (lane_count_ == 8)); }
bool Is16B() const { return (Is128Bits() && (lane_count_ == 16)); }
bool Is4H() const { return (Is64Bits() && (lane_count_ == 4)); }
bool Is8H() const { return (Is128Bits() && (lane_count_ == 8)); }
bool Is2S() const { return (Is64Bits() && (lane_count_ == 2)); }
bool Is4S() const { return (Is128Bits() && (lane_count_ == 4)); }
bool Is1D() const { return (Is64Bits() && (lane_count_ == 1)); }
bool Is2D() const { return (Is128Bits() && (lane_count_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
@ -317,22 +385,22 @@ struct VRegister : public CPURegister {
bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSizeInBits; }
bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSizeInBits; }
bool IsScalar() const { return lane_count == 1; }
bool IsVector() const { return lane_count > 1; }
bool IsScalar() const { return lane_count_ == 1; }
bool IsVector() const { return lane_count_ > 1; }
bool IsSameFormat(const VRegister& other) const {
return (reg_size == other.reg_size) && (lane_count == other.lane_count);
return (reg_size_ == other.reg_size_) && (lane_count_ == other.lane_count_);
}
int LaneCount() const { return lane_count; }
int LaneCount() const { return lane_count_; }
unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count; }
unsigned LaneSizeInBytes() const { return SizeInBytes() / lane_count_; }
unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
// Start of V8 compatibility section ---------------------
static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
STATIC_ASSERT(kMaxNumRegisters == Code::kAfterLast);
STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast);
// Crankshaft can use all the V registers except:
// - d15 which is used to keep the 0 double value
@ -343,51 +411,52 @@ struct VRegister : public CPURegister {
return VRegister::Create(code, kDRegSizeInBits);
}
// End of V8 compatibility section -----------------------
};
static_assert(sizeof(CPURegister) == sizeof(Register),
"CPURegister must be same size as Register");
static_assert(sizeof(CPURegister) == sizeof(VRegister),
"CPURegister must be same size as VRegister");
private:
int lane_count_;
#define DEFINE_REGISTER(register_class, name, code, size, type) \
constexpr register_class name { CPURegister(code, size, type) }
#define ALIAS_REGISTER(register_class, alias, name) \
constexpr register_class alias = name
constexpr explicit VRegister(const CPURegister& r, int lane_count)
: CPURegister(r), lane_count_(lane_count) {}
static constexpr bool IsValidLaneCount(int lane_count) {
return base::bits::IsPowerOfTwo(lane_count) && lane_count <= 16;
}
};
static_assert(IS_TRIVIALLY_COPYABLE(VRegister),
"VRegister can efficiently be passed by value");
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
DEFINE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(VRegister, NoVReg, 0, 0, CPURegister::kNoRegister);
DEFINE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
constexpr Register NoReg = Register::no_reg();
constexpr VRegister NoVReg = VRegister::no_reg();
constexpr CPURegister NoCPUReg = CPURegister::no_reg();
// v8 compatibility.
DEFINE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
constexpr Register no_reg = NoReg;
#define DEFINE_REGISTER(register_class, name, ...) \
constexpr register_class name = register_class::Create<__VA_ARGS__>()
#define ALIAS_REGISTER(register_class, alias, name) \
constexpr register_class alias = name
#define DEFINE_REGISTERS(N) \
DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits, CPURegister::kRegister); \
DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits, CPURegister::kRegister);
#define DEFINE_REGISTERS(N) \
DEFINE_REGISTER(Register, w##N, N, kWRegSizeInBits); \
DEFINE_REGISTER(Register, x##N, N, kXRegSizeInBits);
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
CPURegister::kRegister);
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
CPURegister::kRegister);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits, \
CPURegister::kVRegister); \
DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits, \
CPURegister::kVRegister); \
DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits, \
CPURegister::kVRegister); \
DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits, \
CPURegister::kVRegister); \
DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits, \
CPURegister::kVRegister); \
DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits, CPURegister::kVRegister);
DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
DEFINE_REGISTER(VRegister, h##N, N, kHRegSizeInBits); \
DEFINE_REGISTER(VRegister, s##N, N, kSRegSizeInBits); \
DEFINE_REGISTER(VRegister, d##N, N, kDRegSizeInBits); \
DEFINE_REGISTER(VRegister, q##N, N, kQRegSizeInBits); \
DEFINE_REGISTER(VRegister, v##N, N, kQRegSizeInBits);
GENERAL_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
@ -416,6 +485,9 @@ ALIAS_REGISTER(Register, lr, x30);
ALIAS_REGISTER(Register, xzr, x31);
ALIAS_REGISTER(Register, wzr, w31);
// Register used for padding stack slots.
ALIAS_REGISTER(Register, padreg, x31);
// Keeps the 0 double value.
ALIAS_REGISTER(VRegister, fp_zero, d15);
// MacroAssembler fixed V Registers.
@ -485,12 +557,11 @@ typedef VRegister Simd128Register;
// Lists of registers.
class CPURegList {
public:
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.SizeInBits()), type_(reg1.type()) {
explicit CPURegList(CPURegister reg1, CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg, CPURegister reg4 = NoCPUReg)
: list_(reg1.bit() | reg2.bit() | reg3.bit() | reg4.bit()),
size_(reg1.SizeInBits()),
type_(reg1.type()) {
DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
DCHECK(IsValid());
}
@ -581,10 +652,10 @@ class CPURegList {
const CPURegister& other4 = NoCPUReg) const {
DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.bit();
if (!other3.IsNone() && (other3.type() == type_)) list |= other3.bit();
if (!other4.IsNone() && (other4.type() == type_)) list |= other4.bit();
return (list_ & list) != 0;
}
@ -1003,13 +1074,6 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
static constexpr int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
static constexpr int kDebugBreakSlotInstructions = 5;
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of calls to EndBlockConstpool.
@ -1058,9 +1122,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the emission of a constant pool.
//
// The emission of constant and veneer pools depends on the size of the code

1452
deps/v8/src/arm64/code-stubs-arm64.cc

File diff suppressed because it is too large

99
deps/v8/src/arm64/code-stubs-arm64.h

@ -9,9 +9,6 @@ namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Compares two flat one-byte strings and returns result in x0.
@ -35,34 +32,6 @@ class StringHelper : public AllStatic {
};
class StoreRegistersStateStub: public PlatformCodeStub {
public:
explicit StoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
class RecordWriteStub: public PlatformCodeStub {
public:
// Stub to record the write of 'value' at 'address' in 'object'.
@ -99,69 +68,9 @@ class RecordWriteStub: public PlatformCodeStub {
bool SometimesSetsUpAFrame() override { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
Instruction* instr1 =
reinterpret_cast<Instruction*>(stub->instruction_start());
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
static Mode GetMode(Code* stub);
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
DCHECK(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
// We patch the two first instructions of the stub back and forth between an
// adr and branch when we start and stop incremental heap marking.
// The branch is
// b label
// The adr is
// adr xzr label
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
auto offset_to_incremental_noncompacting =
static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
DCHECK(GetMode(stub) == mode);
}
static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
@ -212,8 +121,8 @@ class RecordWriteStub: public PlatformCodeStub {
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register scratch2_;
Register scratch1_ = NoReg;
Register scratch2_ = NoReg;
CPURegList saved_regs_;
CPURegList saved_fp_regs_;

71
deps/v8/src/arm64/codegen-arm64.cc

@ -21,80 +21,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
PatchingAssembler patcher(isolate, young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence);
}
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
return GetAgeOfCodeAgeStub(stub);
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
Code::Age age) {
PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code* stub = GetCodeAgeStub(isolate, age);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,

2
deps/v8/src/arm64/constants-arm64.h

@ -171,7 +171,7 @@ typedef uint16_t float16;
V_(ImmAddSub, 21, 10, Bits) \
V_(ShiftAddSub, 23, 22, Bits) \
\
/* Add/substract extend */ \
/* Add/subtract extend */ \
V_(ImmExtendShift, 12, 10, Bits) \
V_(ExtendMode, 15, 13, Bits) \
\

78
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -4,11 +4,10 @@
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
@ -16,77 +15,6 @@
namespace v8 {
namespace internal {
int Deoptimizer::patch_size() {
// Size of the code used to patch lazy bailout points.
// Patching is done by Deoptimizer::DeoptimizeFunction.
return 4 * kInstructionSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
{
PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
patcher.brk(0);
}
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
PatchingAssembler patcher(Assembler::IsolateData(isolate),
code_start_address + osr_offset, 1);
patcher.brk(0);
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(isolate, call_address,
patch_size() / kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
DCHECK((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@ -99,13 +27,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
RegisterConfiguration::Default()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
RegisterConfiguration::Default()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
// We save all the registers expcept jssp, sp and lr.

57
deps/v8/src/arm64/disasm-arm64.cc

@ -823,38 +823,47 @@ void DisassemblingDecoder::VisitLoadStoreRegisterOffset(Instruction* instr) {
Format(instr, mnemonic, form);
}
#define LOAD_STORE_UNSCALED_LIST(V) \
V(STURB_w, "sturb", "'Wt") \
V(STURH_w, "sturh", "'Wt") \
V(STUR_w, "stur", "'Wt") \
V(STUR_x, "stur", "'Xt") \
V(LDURB_w, "ldurb", "'Wt") \
V(LDURH_w, "ldurh", "'Wt") \
V(LDUR_w, "ldur", "'Wt") \
V(LDUR_x, "ldur", "'Xt") \
V(LDURSB_x, "ldursb", "'Xt") \
V(LDURSH_x, "ldursh", "'Xt") \
V(LDURSW_x, "ldursw", "'Xt") \
V(LDURSB_w, "ldursb", "'Wt") \
V(LDURSH_w, "ldursh", "'Wt") \
V(STUR_b, "stur", "'Bt") \
V(STUR_h, "stur", "'Ht") \
V(STUR_s, "stur", "'St") \
V(STUR_d, "stur", "'Dt") \
V(LDUR_b, "ldur", "'Bt") \
V(LDUR_h, "ldur", "'Ht") \
V(LDUR_s, "ldur", "'St") \
V(LDUR_d, "ldur", "'Dt") \
V(STUR_q, "stur", "'Qt") \
V(LDUR_q, "ldur", "'Qt")
void DisassemblingDecoder::VisitLoadStoreUnscaledOffset(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xns'ILS]";
const char *form_x = "'Xt, ['Xns'ILS]";
const char *form_s = "'St, ['Xns'ILS]";
const char *form_d = "'Dt, ['Xns'ILS]";
const char* mnemonic = "unimplemented";
const char* form = "(LoadStoreUnscaledOffset)";
switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
case STURB_w: mnemonic = "sturb"; break;
case STURH_w: mnemonic = "sturh"; break;
case STUR_w: mnemonic = "stur"; break;
case STUR_x: mnemonic = "stur"; form = form_x; break;
case STUR_s: mnemonic = "stur"; form = form_s; break;
case STUR_d: mnemonic = "stur"; form = form_d; break;
case LDURB_w: mnemonic = "ldurb"; break;
case LDURH_w: mnemonic = "ldurh"; break;
case LDUR_w: mnemonic = "ldur"; break;
case LDUR_x: mnemonic = "ldur"; form = form_x; break;
case LDUR_s: mnemonic = "ldur"; form = form_s; break;
case LDUR_d: mnemonic = "ldur"; form = form_d; break;
case LDURSB_x: form = form_x; // Fall through.
case LDURSB_w: mnemonic = "ldursb"; break;
case LDURSH_x: form = form_x; // Fall through.
case LDURSH_w: mnemonic = "ldursh"; break;
case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
default: form = "(LoadStoreUnscaledOffset)";
#define LS_UNSCALEDOFFSET(A, B, C) \
case A: \
mnemonic = B; \
form = C ", ['Xns'ILS]"; \
break;
LOAD_STORE_UNSCALED_LIST(LS_UNSCALEDOFFSET)
#undef LS_UNSCALEDOFFSET
}
Format(instr, mnemonic, form);
}
void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
const char *mnemonic = "ldr";
const char *form = "(LoadLiteral)";

10
deps/v8/src/arm64/eh-frame-arm64.cc

@ -29,15 +29,15 @@ void EhFrameWriter::WriteInitialStateInCie() {
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
case Register::kCode_x28:
case kRegCode_x28:
return kJsSpDwarfCode;
case Register::kCode_x29:
case kRegCode_x29:
return kFpDwarfCode;
case Register::kCode_x30:
case kRegCode_x30:
return kLrDwarfCode;
case Register::kCode_x31:
case kRegCode_x31:
return kCSpDwarfCode;
case Register::kCode_x0:
case kRegCode_x0:
return kX0DwarfCode;
default:
UNIMPLEMENTED();

14
deps/v8/src/arm64/frames-arm64.cc → deps/v8/src/arm64/frame-constants-arm64.cc

@ -2,25 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/frames-arm64.h"
#include "src/frame-constants.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
#include "src/frames.h"
#include "src/arm64/frame-constants-arm64.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
}
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
// Round up to a multiple of two, to make the frame a multiple of 16 bytes.
return RoundUp(register_count, 2);
}
} // namespace internal
} // namespace v8

26
deps/v8/src/arm64/frames-arm64.h → deps/v8/src/arm64/frame-constants-arm64.h

@ -2,32 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/constants-arm64.h"
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
namespace v8 {
namespace internal {
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count();
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
@ -39,12 +19,11 @@ class ExitFrameConstants : public TypedFrameConstants {
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
static const int kLastExitFrameField = kCodeOffset;
static const int kLastExitFrameField = kCodeOffset;
static const int kConstantPoolOffset = 0; // Not used
static const int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
@ -57,7 +36,6 @@ class JavaScriptFrameConstants : public AllStatic {
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
};
} // namespace internal
} // namespace v8

46
deps/v8/src/arm64/instructions-arm64-constants.cc

@ -0,0 +1,46 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <cstdint>
namespace v8 {
namespace internal {
// ISA constants. --------------------------------------------------------------
// The following code initializes float/double variables with bit patterns
// without using static initializers (which is surprisingly difficult in
// C++). These variables are used by client code as extern float16,
// extern float and extern double types, which works because (I think) the
// linker ignores the types. This is kept in a separate source file to
// avoid breaking jumbo builds.
//
// TODO(mostynb): replace these with std::numeric_limits constexpr's where
// possible, and figure out how to replace *DefaultNaN with something clean,
// then move this code back into instructions-arm64.cc with the same types
// that client code uses.
extern const uint16_t kFP16PositiveInfinity = 0x7c00;
extern const uint16_t kFP16NegativeInfinity = 0xfc00;
extern const uint32_t kFP32PositiveInfinity = 0x7f800000;
extern const uint32_t kFP32NegativeInfinity = 0xff800000;
extern const uint64_t kFP64PositiveInfinity = 0x7ff0000000000000UL;
extern const uint64_t kFP64NegativeInfinity = 0xfff0000000000000UL;
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
extern const uint64_t kFP64SignallingNaN = 0x7ff000007f800001;
extern const uint32_t kFP32SignallingNaN = 0x7f800001;
// A similar value, but as a quiet NaN.
extern const uint64_t kFP64QuietNaN = 0x7ff800007fc00001;
extern const uint32_t kFP32QuietNaN = 0x7fc00001;
// The default NaN values (for FPCR.DN=1).
extern const uint64_t kFP64DefaultNaN = 0x7ff8000000000000UL;
extern const uint32_t kFP32DefaultNaN = 0x7fc00000;
extern const uint16_t kFP16DefaultNaN = 0x7e00;
} // namespace internal
} // namespace v8

3
deps/v8/src/arm64/instructions-arm64.cc

@ -4,15 +4,12 @@
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/instructions-arm64.h"
namespace v8 {
namespace internal {
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;

44
deps/v8/src/arm64/instructions-arm64.h

@ -18,44 +18,26 @@ namespace internal {
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
// without using static initializers: If ARM64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT16(name, value) extern const float16 name
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00);
DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00);
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
extern const float16 kFP16PositiveInfinity;
extern const float16 kFP16NegativeInfinity;
extern const float kFP32PositiveInfinity;
extern const float kFP32NegativeInfinity;
extern const double kFP64PositiveInfinity;
extern const double kFP64NegativeInfinity;
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
extern const double kFP64SignallingNaN;
extern const float kFP32SignallingNaN;
// A similar value, but as a quiet NaN.
DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
extern const double kFP64QuietNaN;
extern const float kFP32QuietNaN;
// The default NaN values (for FPCR.DN=1).
DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00);
#undef DEFINE_FLOAT16
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
extern const double kFP64DefaultNaN;
extern const float kFP32DefaultNaN;
extern const float16 kFP16DefaultNaN;
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);

138
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -22,6 +22,18 @@ void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
default_stub_registers);
}
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
data->RestrictAllocatableRegisters(default_stub_registers,
arraysize(default_stub_registers));
CHECK_LE(static_cast<size_t>(kParameterCount),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
return x1;
}
@ -49,8 +61,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
const Register StringConcatDescriptor::ArgumentsCountRegister() { return x0; }
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
@ -82,58 +92,6 @@ void TypeofDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {x3, x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: closure
// x2: array literal index
// x1: constant elements
Register registers[] = {x3, x2, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {x3, x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
Register registers[] = {x2, x3, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
@ -141,33 +99,6 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@ -321,13 +252,6 @@ void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@ -345,33 +269,6 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: allocation site
// x1: left operand
// x0: right operand
Register registers[] = {x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// x1 -- lhs
// x0 -- rhs
// x4 -- slot id
// x3 -- vector
Register registers[] = {x1, x0, x4, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CountOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@ -440,17 +337,6 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsThenConstructArrayDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x1, // target to call checked to be Array function
x2, // allocation site feedback if available, undefined otherwise
x3 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

34
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -1244,25 +1244,6 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
void TurboAssembler::jmp(Label* L) { B(L); }
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
Label* not_string,
Label* string) {
Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
STATIC_ASSERT(kStringTag == 0);
DCHECK((string != NULL) || (not_string != NULL));
if (string == NULL) {
TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
} else if (not_string == NULL) {
TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
} else {
TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
B(string);
}
}
void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
@ -1277,14 +1258,6 @@ void TurboAssembler::Push(Smi* smi) {
Push(tmp);
}
void MacroAssembler::PushObject(Handle<Object> handle) {
if (handle->IsHeapObject()) {
Push(Handle<HeapObject>::cast(handle));
} else {
Push(Smi::cast(*handle));
}
}
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
uint64_t size = count * unit_size;
@ -1382,6 +1355,13 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
}
void TurboAssembler::DropArguments(const Register& count, uint64_t unit_size) {
Drop(count, unit_size);
}
void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
Drop(count, unit_size);
}
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));

859
deps/v8/src/arm64/macro-assembler-arm64.cc

File diff suppressed because it is too large

241
deps/v8/src/arm64/macro-assembler-arm64.h

@ -23,13 +23,17 @@
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#define ASM_LOCATION_IN_ASSEMBLER(message) \
Debug("LOCATION: " message, __LINE__, NO_PARAM)
#else
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
#else
#define ASM_UNIMPLEMENTED(message)
#define ASM_UNIMPLEMENTED_BREAK(message)
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
@ -180,21 +184,7 @@ enum PreShiftImmMode {
class TurboAssembler : public Assembler {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size),
isolate_(isolate),
#if DEBUG
allow_macro_instructions_(true),
#endif
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()),
sp_(jssp),
use_real_aborts_(true) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
CodeObjectRequired create_code_object);
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
@ -677,6 +667,20 @@ class TurboAssembler : public Assembler {
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
// slot-sized units. Offset dst must be less than src, or the gap between
// them must be greater than or equal to slot_count, otherwise the result is
// unpredictable. The function may corrupt its register arguments.
void CopySlots(int dst, Register src, Register slot_count);
void CopySlots(Register dst, Register src, Register slot_count);
// Copy count double words from the address in register src to the address
// in register dst. Address dst must be less than src, or the gap between
// them must be greater than or equal to count double words, otherwise the
// result is unpredictable. The function may corrupt its register arguments.
void CopyDoubleWords(Register dst, Register src, Register count);
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
// Helper function for double immediate.
@ -698,6 +702,20 @@ class TurboAssembler : public Assembler {
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
// Drop arguments from stack without actually accessing memory.
// This will currently drop 'count' arguments of the given size from the
// stack.
// TODO(arm64): Update this to round up the number of bytes dropped to
// a multiple of 16, so that we can remove jssp.
inline void DropArguments(const Register& count,
uint64_t unit_size = kXRegSize);
// Drop slots from stack without actually accessing memory.
// This will currently drop 'count' slots of the given size from the stack.
// TODO(arm64): Update this to round up the number of bytes dropped to
// a multiple of 16, so that we can remove jssp.
inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
@ -783,6 +801,13 @@ class TurboAssembler : public Assembler {
inline void push(Register src) { Push(src); }
inline void pop(Register dst) { Pop(dst); }
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
// registers are associated with higher memory addresses (as in the A32 push
@ -796,6 +821,24 @@ class TurboAssembler : public Assembler {
void PushCPURegList(CPURegList registers);
void PopCPURegList(CPURegList registers);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg) const;
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
// dst is not necessarily equal to imm; it may have had a shifting operation
@ -864,6 +907,10 @@ class TurboAssembler : public Assembler {
void Call(Address target, RelocInfo::Mode rmode);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
Call(target, rmode);
}
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
static int CallSize(Register target);
@ -1147,41 +1194,7 @@ class TurboAssembler : public Assembler {
inline void Mrs(const Register& rt, SystemRegister sysreg);
// Generates function prologue code.
void Prologue(bool code_pre_aging);
// Code ageing support functions.
// Code ageing on ARM64 works similarly to on ARM. When V8 wants to mark a
// function as old, it replaces some of the function prologue (generated by
// FullCodeGenerator::Generate) with a call to a special stub (ultimately
// generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
// function prologue to its initial young state (indicating that it has been
// recently run) and continues. A young function is therefore one which has a
// normal frame setup sequence, and an old function has a code age sequence
// which calls a code ageing stub.
// Set up a basic stack frame for young code (or code exempt from ageing) with
// type FUNCTION. It may be patched later for code ageing support. This is
// done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
//
// This function takes an Assembler so it can be called from either a
// MacroAssembler or a PatchingAssembler context.
static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
// Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
void EmitFrameSetupForCodeAgePatching();
// Emit a code age sequence that calls the relevant code age stub. The code
// generated by this sequence is expected to replace the code generated by
// EmitFrameSetupForCodeAgePatching, and represents an old function.
//
// If stub is NULL, this function generates the code age sequence but omits
// the stub address that is normally embedded in the instruction stream. This
// can be used by debug code to verify code age sequences.
static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
// Call EmitCodeAgeSequence from a MacroAssembler context.
void EmitCodeAgeSequence(Code* stub);
void Prologue();
void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
DCHECK(allow_macro_instructions());
@ -1323,10 +1336,6 @@ class MacroAssembler : public TurboAssembler {
STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
// V8-specific load/store helpers.
void Load(const Register& rt, const MemOperand& addr, Representation r);
void Store(const Register& rt, const MemOperand& addr, Representation r);
// Branch type inversion relies on these relations.
STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
(reg_bit_clear == (reg_bit_set ^ 1)) &&
@ -1601,8 +1610,6 @@ class MacroAssembler : public TurboAssembler {
void PushMultipleTimes(CPURegister src, Register count);
void PushMultipleTimes(CPURegister src, int count);
inline void PushObject(Handle<Object> handle);
// Sometimes callers need to push or pop multiple registers in a way that is
// difficult to structure efficiently for fixed Push or Pop calls. This scope
// allows push requests to be queued up, then flushed at once. The
@ -1726,16 +1733,10 @@ class MacroAssembler : public TurboAssembler {
// Helpers ------------------------------------------------------------------
// Store an object to the root table.
void StoreRoot(Register source,
Heap::RootListIndex index);
static int SafepointRegisterStackIndex(int reg_code);
void LoadInstanceDescriptors(Register map,
Register descriptors);
void EnumLengthUntagged(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
@ -1855,22 +1856,12 @@ class MacroAssembler : public TurboAssembler {
CallRuntime(function, function->nargs, save_doubles);
}
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
void TailCallRuntime(Runtime::FunctionId fid);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@ -1879,11 +1870,8 @@ class MacroAssembler : public TurboAssembler {
// 'expected' must use an immediate or x2.
// 'call_kind' must be x5.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
const ParameterCount& actual, Label* done,
InvokeFlag flag, bool* definitely_mismatches);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@ -1891,41 +1879,22 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunction(Register function,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
const ParameterCount& actual, InvokeFlag flag);
// ---- Code generation helpers ----
// Frame restart support
void MaybeDropFrames();
// Exception handling
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopStackHandler();
// ---------------------------------------------------------------------------
// Allocation support
@ -1935,9 +1904,6 @@ class MacroAssembler : public TurboAssembler {
//
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int object_size,
Register result,
Register scratch1,
@ -1945,17 +1911,6 @@ class MacroAssembler : public TurboAssembler {
Label* gc_required,
AllocationFlags flags);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
// All registers are clobbered.
// If no heap_number_map register is provided, the function will take care of
// loading it.
void AllocateHeapNumber(Register result, Label* gc_required,
Register scratch1, Register scratch2,
CPURegister value = NoVReg,
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
@ -2057,9 +2012,6 @@ class MacroAssembler : public TurboAssembler {
// register.
void LoadElementsKindFromMap(Register result, Register map);
// Load the value from the root list and push it onto the stack.
void PushRoot(Heap::RootListIndex index);
// Compare the object in a register to a value from the root list.
void CompareRoot(const Register& obj, Heap::RootListIndex index);
@ -2073,15 +2025,6 @@ class MacroAssembler : public TurboAssembler {
Heap::RootListIndex index,
Label* if_not_equal);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
// will be overwritten with the type.
// Jumps to not_string or string appropriate. If the appropriate label is
// NULL, fall through.
inline void IsObjectJSStringType(Register object, Register type,
Label* not_string, Label* string = NULL);
// Compare the contents of a register with an operand, and branch to true,
// false or fall through, depending on condition.
void CompareAndSplit(const Register& lhs,
@ -2099,27 +2042,12 @@ class MacroAssembler : public TurboAssembler {
Label* if_any_set,
Label* fall_through);
// ---------------------------------------------------------------------------
// Inline caching support.
// Hash the interger value in 'key' register.
// It uses the same algorithm as ComputeIntegerHash in utils.h.
void GetNumberHash(Register key, Register scratch);
// ---------------------------------------------------------------------------
// Frames.
// Load the type feedback vector from a JavaScript frame.
void EmitLoadFeedbackVector(Register vector);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Returns map with validated enum cache in object register.
void CheckEnumCache(Register object, Register scratch0, Register scratch1,
Register scratch2, Register scratch3, Register scratch4,
Label* call_runtime);
// The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations.
@ -2168,8 +2096,6 @@ class MacroAssembler : public TurboAssembler {
const Register& scratch,
bool restore_context);
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
@ -2210,13 +2136,6 @@ class MacroAssembler : public TurboAssembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
// Store value in register src in the safepoint stack slot for register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
@ -2273,11 +2192,6 @@ class MacroAssembler : public TurboAssembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
@ -2386,11 +2300,6 @@ class MacroAssembler : public TurboAssembler {
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
// Return true if the sequence is a young sequence geneated by
// EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
// sequence is a code age sequence (emitted by EmitCodeAgeSequence).
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
private:
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
@ -2427,7 +2336,7 @@ class MacroAssembler : public TurboAssembler {
};
// Use this scope when you need a one-to-one mapping bewteen methods and
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.
@ -2505,17 +2414,11 @@ class UseScratchRegisterScope {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
Register UnsafeAcquire(const Register& reg) {
return Register(UnsafeAcquire(available_, reg));
}
Register AcquireSameSizeAs(const Register& reg);
VRegister AcquireSameSizeAs(const VRegister& reg);
private:
static CPURegister AcquireNextAvailable(CPURegList* available);
static CPURegister UnsafeAcquire(CPURegList* available,
const CPURegister& reg);
// Available scratch registers.
CPURegList* available_; // kRegister

8
deps/v8/src/arm64/simulator-arm64.cc

@ -231,8 +231,8 @@ void Simulator::CheckPCSComplianceAndRun() {
isolate_->stack_guard()->AdjustStackLimitForSimulator();
#ifdef DEBUG
CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
CHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
DCHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
DCHECK_EQ(kNumberOfCalleeSavedVRegisters, kCalleeSavedV.Count());
int64_t saved_registers[kNumberOfCalleeSavedRegisters];
uint64_t saved_fpregisters[kNumberOfCalleeSavedVRegisters];
@ -254,12 +254,12 @@ void Simulator::CheckPCSComplianceAndRun() {
// Start the simulation!
Run();
#ifdef DEBUG
CHECK_EQ(original_stack, sp());
DCHECK_EQ(original_stack, sp());
// Check that callee-saved registers have been preserved.
register_list = kCalleeSaved;
fpregister_list = kCalleeSavedV;
for (int i = 0; i < kNumberOfCalleeSavedRegisters; i++) {
CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
DCHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
DCHECK(saved_fpregisters[i] ==

1
deps/v8/src/arm64/utils-arm64.cc

@ -132,6 +132,7 @@ int MaskToBit(uint64_t mask) {
return CountTrailingZeros(mask, 64);
}
#undef __
} // namespace internal
} // namespace v8

1
deps/v8/src/asmjs/OWNERS

@ -4,6 +4,7 @@ ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
mtrofin@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org
titzer@chromium.org

347
deps/v8/src/asmjs/asm-js.cc

@ -8,13 +8,16 @@
#include "src/asmjs/asm-parser.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
@ -22,7 +25,7 @@
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@ -33,7 +36,7 @@ const char* const AsmJs::kSingleFunctionName = "__single_function__";
namespace {
enum WasmDataEntries {
kWasmDataCompiledModule,
kWasmDataUsesArray,
kWasmDataUsesBitSet,
kWasmDataEntryCount,
};
@ -48,62 +51,69 @@ Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
return value;
}
bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
wasm::AsmJsParser::StandardMember member,
bool* is_typed_array) {
switch (member) {
case wasm::AsmJsParser::StandardMember::kInfinity: {
Handle<Name> name = isolate->factory()->Infinity_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNumber() && std::isinf(value->Number());
}
case wasm::AsmJsParser::StandardMember::kNaN: {
Handle<Name> name = isolate->factory()->NaN_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNaN();
}
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
case wasm::AsmJsParser::StandardMember::kMath##FName: { \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#fname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
return func->shared()->code() == \
isolate->builtins()->builtin(Builtins::kMath##FName); \
bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
wasm::AsmJsParser::StdlibSet members,
bool* is_typed_array) {
if (members.Contains(wasm::AsmJsParser::StandardMember::kInfinity)) {
members.Remove(wasm::AsmJsParser::StandardMember::kInfinity);
Handle<Name> name = isolate->factory()->Infinity_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNumber() || !std::isinf(value->Number())) return false;
}
if (members.Contains(wasm::AsmJsParser::StandardMember::kNaN)) {
members.Remove(wasm::AsmJsParser::StandardMember::kNaN);
Handle<Name> name = isolate->factory()->NaN_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#fname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
if (func->shared()->code() != \
isolate->builtins()->builtin(Builtins::kMath##FName)) { \
return false; \
} \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
#define STDLIB_MATH_CONST(cname, const_value) \
case wasm::AsmJsParser::StandardMember::kMath##cname: { \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#cname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
return value->IsNumber() && value->Number() == const_value; \
#define STDLIB_MATH_CONST(cname, const_value) \
if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##cname)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##cname); \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#cname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsNumber() || value->Number() != const_value) return false; \
}
STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
#undef STDLIB_MATH_CONST
#define STDLIB_ARRAY_TYPE(fname, FName) \
case wasm::AsmJsParser::StandardMember::k##FName: { \
*is_typed_array = true; \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#FName))); \
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
return func.is_identical_to(isolate->fname()); \
#define STDLIB_ARRAY_TYPE(fname, FName) \
if (members.Contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
*is_typed_array = true; \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#FName))); \
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
if (!func.is_identical_to(isolate->fname())) return false; \
}
STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array)
STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array)
STDLIB_ARRAY_TYPE(int16_array_fun, Int16Array)
STDLIB_ARRAY_TYPE(uint16_array_fun, Uint16Array)
STDLIB_ARRAY_TYPE(int32_array_fun, Int32Array)
STDLIB_ARRAY_TYPE(uint32_array_fun, Uint32Array)
STDLIB_ARRAY_TYPE(float32_array_fun, Float32Array)
STDLIB_ARRAY_TYPE(float64_array_fun, Float64Array)
STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array)
STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array)
STDLIB_ARRAY_TYPE(int16_array_fun, Int16Array)
STDLIB_ARRAY_TYPE(uint16_array_fun, Uint16Array)
STDLIB_ARRAY_TYPE(int32_array_fun, Int32Array)
STDLIB_ARRAY_TYPE(uint32_array_fun, Uint32Array)
STDLIB_ARRAY_TYPE(float32_array_fun, Float32Array)
STDLIB_ARRAY_TYPE(float64_array_fun, Float64Array)
#undef STDLIB_ARRAY_TYPE
}
UNREACHABLE();
// All members accounted for.
DCHECK(members.IsEmpty());
return true;
}
void Report(Handle<Script> script, int position, Vector<const char> text,
@ -166,91 +176,147 @@ void ReportInstantiationFailure(Handle<Script> script, int position,
} // namespace
MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
wasm::ZoneBuffer* module = nullptr;
wasm::ZoneBuffer* asm_offsets = nullptr;
Handle<FixedArray> uses_array;
Handle<WasmModuleObject> compiled;
// The compilation of asm.js modules is split into two distinct steps:
// [1] The asm.js module source is parsed, validated, and translated to a
// valid WebAssembly module. The result are two vectors representing the
// encoded module as well as encoded source position information.
// [2] The module is handed to WebAssembly which decodes it into an internal
// representation and eventually compiles it to machine code.
double translate_time; // Time (milliseconds) taken to execute step [1].
double compile_time; // Time (milliseconds) taken to execute step [2].
// The compilation of asm.js modules is split into two distinct steps:
// [1] ExecuteJobImpl: The asm.js module source is parsed, validated, and
// translated to a valid WebAssembly module. The result are two vectors
// representing the encoded module as well as encoded source position
// information and a StdlibSet bit set.
// [2] FinalizeJobImpl: The module is handed to WebAssembly which decodes it
// into an internal representation and eventually compiles it to machine
// code.
class AsmJsCompilationJob final : public CompilationJob {
public:
explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
Isolate* isolate)
: CompilationJob(isolate, parse_info, &compilation_info_, "AsmJs"),
zone_(isolate->allocator(), ZONE_NAME),
compilation_info_(&zone_, isolate, parse_info, literal),
module_(nullptr),
asm_offsets_(nullptr),
translate_time_(0),
compile_time_(0) {}
protected:
Status PrepareJobImpl() final;
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
private:
Zone zone_;
CompilationInfo compilation_info_;
wasm::ZoneBuffer* module_;
wasm::ZoneBuffer* asm_offsets_;
wasm::AsmJsParser::StdlibSet stdlib_uses_;
double translate_time_; // Time (milliseconds) taken to execute step [1].
double compile_time_; // Time (milliseconds) taken to execute step [2].
DISALLOW_COPY_AND_ASSIGN(AsmJsCompilationJob);
};
CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl() {
return SUCCEEDED;
}
CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
{
HistogramTimerScope translate_time_scope(
info->isolate()->counters()->asm_wasm_translation_time());
size_t compile_zone_start = info->zone()->allocation_size();
base::ElapsedTimer translate_timer;
translate_timer.Start();
Zone* compile_zone = info->zone();
Zone translate_zone(info->isolate()->allocator(), ZONE_NAME);
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
handle(String::cast(info->script()->source())),
info->literal()->start_position(), info->literal()->end_position()));
uintptr_t stack_limit = info->isolate()->stack_guard()->real_climit();
wasm::AsmJsParser parser(&translate_zone, stack_limit, std::move(stream));
if (!parser.Run()) {
DCHECK(!info->isolate()->has_pending_exception());
ReportCompilationFailure(info->script(), parser.failure_location(),
parser.failure_message());
return MaybeHandle<FixedArray>();
}
module = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteTo(*module);
asm_offsets = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets);
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(parser.stdlib_uses()->size()));
int count = 0;
for (auto i : *parser.stdlib_uses()) {
uses_array->set(count++, Smi::FromInt(i));
}
size_t compile_zone_size =
info->zone()->allocation_size() - compile_zone_start;
size_t translate_zone_size = translate_zone.allocation_size();
info->isolate()
->counters()
->asm_wasm_translation_peak_memory_bytes()
->AddSample(static_cast<int>(translate_zone_size));
translate_time = translate_timer.Elapsed().InMillisecondsF();
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
"translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
translate_time, translate_zone_size / KB, compile_zone_size / KB);
}
HistogramTimerScope translate_time_scope(
compilation_info()->isolate()->counters()->asm_wasm_translation_time());
size_t compile_zone_start = compilation_info()->zone()->allocation_size();
base::ElapsedTimer translate_timer;
translate_timer.Start();
Zone* compile_zone = compilation_info()->zone();
Zone translate_zone(compilation_info()->isolate()->allocator(), ZONE_NAME);
Utf16CharacterStream* stream = parse_info()->character_stream();
base::Optional<AllowHandleDereference> allow_deref;
if (stream->can_access_heap()) {
DCHECK(
ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
allow_deref.emplace();
}
stream->Seek(compilation_info()->literal()->start_position());
wasm::AsmJsParser parser(&translate_zone, stack_limit(), stream);
if (!parser.Run()) {
// TODO(rmcilroy): Temporarily allow heap access here until we have a
// mechanism for delaying pending messages.
DCHECK(
ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
AllowHeapAllocation allow_allocation;
AllowHandleAllocation allow_handles;
allow_deref.emplace();
// Step 2: Compile and decode the WebAssembly module.
{
base::ElapsedTimer compile_timer;
compile_timer.Start();
wasm::ErrorThrower thrower(info->isolate(), "AsmJs::Compile");
MaybeHandle<WasmModuleObject> maybe_compiled = SyncCompileTranslatedAsmJs(
info->isolate(), &thrower,
wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
Vector<const byte>(asm_offsets->begin(), asm_offsets->size()));
DCHECK(!maybe_compiled.is_null());
DCHECK(!thrower.error());
compile_time = compile_timer.Elapsed().InMillisecondsF();
compiled = maybe_compiled.ToHandleChecked();
DCHECK(!compilation_info()->isolate()->has_pending_exception());
ReportCompilationFailure(compilation_info()->script(),
parser.failure_location(),
parser.failure_message());
return FAILED;
}
module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteTo(*module_);
asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets_);
stdlib_uses_ = *parser.stdlib_uses();
size_t compile_zone_size =
compilation_info()->zone()->allocation_size() - compile_zone_start;
size_t translate_zone_size = translate_zone.allocation_size();
compilation_info()
->isolate()
->counters()
->asm_wasm_translation_peak_memory_bytes()
->AddSample(static_cast<int>(translate_zone_size));
translate_time_ = translate_timer.Elapsed().InMillisecondsF();
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
"translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
translate_time_, translate_zone_size / KB, compile_zone_size / KB);
}
return SUCCEEDED;
}
CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
// Step 2: Compile and decode the WebAssembly module.
base::ElapsedTimer compile_timer;
compile_timer.Start();
Handle<HeapNumber> uses_bitset =
compilation_info()->isolate()->factory()->NewHeapNumberFromBits(
stdlib_uses_.ToIntegral());
wasm::ErrorThrower thrower(compilation_info()->isolate(), "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
SyncCompileTranslatedAsmJs(
compilation_info()->isolate(), &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
compilation_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
// The result is a compiled module and serialized standard library uses.
Handle<FixedArray> result =
info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
compilation_info()->isolate()->factory()->NewFixedArray(
kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled);
result->set(kWasmDataUsesArray, *uses_array);
ReportCompilationSuccess(info->script(), info->literal()->position(),
translate_time, compile_time, module->size());
return result;
result->set(kWasmDataUsesBitSet, *uses_bitset);
compilation_info()->SetAsmWasmData(result);
compilation_info()->SetCode(
BUILTIN_CODE(compilation_info()->isolate(), InstantiateAsmJs));
ReportCompilationSuccess(compilation_info()->script(),
compilation_info()->literal()->position(),
translate_time_, compile_time_, module_->size());
return SUCCEEDED;
}
CompilationJob* AsmJs::NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Isolate* isolate) {
return new AsmJsCompilationJob(parse_info, literal, isolate);
}
MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
@ -261,8 +327,8 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<JSArrayBuffer> memory) {
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
Handle<FixedArray> stdlib_uses(
FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
Handle<HeapNumber> uses_bitset(
HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)));
Handle<WasmModuleObject> module(
WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
Handle<Script> script(Script::cast(shared->script()));
@ -272,16 +338,14 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
// Check that all used stdlib members are valid.
bool stdlib_use_of_typed_array_present = false;
for (int i = 0; i < stdlib_uses->length(); ++i) {
wasm::AsmJsParser::StdlibSet stdlib_uses(uses_bitset->value_as_bits());
if (!stdlib_uses.IsEmpty()) { // No checking needed if no uses.
if (stdlib.is_null()) {
ReportInstantiationFailure(script, position, "Requires standard library");
return MaybeHandle<Object>();
}
int member_id = Smi::ToInt(stdlib_uses->get(i));
wasm::AsmJsParser::StandardMember member =
static_cast<wasm::AsmJsParser::StandardMember>(member_id);
if (!IsStdlibMemberValid(isolate, stdlib, member,
&stdlib_use_of_typed_array_present)) {
if (!AreStdlibMembersValid(isolate, stdlib, stdlib_uses,
&stdlib_use_of_typed_array_present)) {
ReportInstantiationFailure(script, position, "Unexpected stdlib member");
return MaybeHandle<Object>();
}
@ -300,13 +364,22 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
ReportInstantiationFailure(script, position, "Unexpected heap size");
return MaybeHandle<Object>();
}
// Currently WebAssembly only supports heap sizes within the uint32_t range.
if (size > std::numeric_limits<uint32_t>::max()) {
ReportInstantiationFailure(script, position, "Unexpected heap size");
return MaybeHandle<Object>();
}
} else {
memory = Handle<JSArrayBuffer>::null();
}
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
if (maybe_module_object.is_null()) {
DCHECK(!isolate->has_pending_exception());
// An exception caused by the module start function will be set as pending
// and bypass the {ErrorThrower}, this happens in case of a stack overflow.
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
thrower.Reset(); // Ensure exceptions do not propagate.
ReportInstantiationFailure(script, position, "Internal wasm failure");
return MaybeHandle<Object>();

7
deps/v8/src/asmjs/asm-js.h

@ -13,13 +13,18 @@ namespace v8 {
namespace internal {
class CompilationInfo;
class CompilationJob;
class FunctionLiteral;
class JSArrayBuffer;
class ParseInfo;
class SharedFunctionInfo;
// Interface to compile and instantiate for asm.js modules.
class AsmJs {
public:
static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Isolate* isolate);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo>,
Handle<FixedArray> wasm_data,

133
deps/v8/src/asmjs/asm-parser.cc

@ -12,7 +12,7 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
#include "src/base/optional.h"
#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
#include "src/flags.h"
#include "src/parsing/scanner.h"
#include "src/wasm/wasm-opcodes.h"
@ -68,8 +68,9 @@ namespace wasm {
#define TOK(name) AsmJsScanner::kToken_##name
AsmJsParser::AsmJsParser(Zone* zone, uintptr_t stack_limit,
std::unique_ptr<Utf16CharacterStream> stream)
Utf16CharacterStream* stream)
: zone_(zone),
scanner_(stream),
module_builder_(new (zone) WasmModuleBuilder(zone)),
return_type_(nullptr),
stack_limit_(stack_limit),
@ -87,8 +88,8 @@ AsmJsParser::AsmJsParser(Zone* zone, uintptr_t stack_limit,
call_coercion_deferred_(nullptr),
pending_label_(0),
global_imports_(zone) {
module_builder_->SetMinMemorySize(0);
InitializeStdlibTypes();
scanner_.SetStream(std::move(stream));
}
void AsmJsParser::InitializeStdlibTypes() {
@ -102,13 +103,15 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_dqdq2d_->AsFunctionType()->AddArgument(dq);
auto* f = AsmType::Float();
auto* fh = AsmType::Floatish();
auto* fq = AsmType::FloatQ();
stdlib_fq2f_ = AsmType::Function(zone(), f);
stdlib_fq2f_->AsFunctionType()->AddArgument(fq);
auto* fq2fh = AsmType::Function(zone(), fh);
fq2fh->AsFunctionType()->AddArgument(fq);
auto* s = AsmType::Signed();
auto* s2s = AsmType::Function(zone(), s);
s2s->AsFunctionType()->AddArgument(s);
auto* u = AsmType::Unsigned();
auto* s2u = AsmType::Function(zone(), u);
s2u->AsFunctionType()->AddArgument(s);
auto* i = AsmType::Int();
stdlib_i2s_ = AsmType::Function(zone_, s);
@ -118,24 +121,36 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_ii2s_->AsFunctionType()->AddArgument(i);
stdlib_ii2s_->AsFunctionType()->AddArgument(i);
// The signatures in "9 Standard Library" of the spec draft are outdated and
// have been superseded with the following by an errata:
// - Math.min/max : (signed, signed...) -> signed
// (double, double...) -> double
// (float, float...) -> float
auto* minmax_d = AsmType::MinMaxType(zone(), d, d);
// *VIOLATION* The float variant is not part of the spec, but firefox accepts
// it.
auto* minmax_f = AsmType::MinMaxType(zone(), f, f);
auto* minmax_i = AsmType::MinMaxType(zone(), s, i);
auto* minmax_s = AsmType::MinMaxType(zone(), s, s);
stdlib_minmax_ = AsmType::OverloadedFunction(zone());
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_i);
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_s);
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_f);
stdlib_minmax_->AsOverloadedFunctionType()->AddOverload(minmax_d);
// The signatures in "9 Standard Library" of the spec draft are outdated and
// have been superseded with the following by an errata:
// - Math.abs : (signed) -> unsigned
// (double?) -> double
// (float?) -> floatish
stdlib_abs_ = AsmType::OverloadedFunction(zone());
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(s2s);
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(s2u);
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(stdlib_dq2d_);
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(stdlib_fq2f_);
stdlib_abs_->AsOverloadedFunctionType()->AddOverload(fq2fh);
// The signatures in "9 Standard Library" of the spec draft are outdated and
// have been superseded with the following by an errata:
// - Math.ceil/floor/sqrt : (double?) -> double
// (float?) -> floatish
stdlib_ceil_like_ = AsmType::OverloadedFunction(zone());
stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(stdlib_dq2d_);
stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(stdlib_fq2f_);
stdlib_ceil_like_->AsOverloadedFunctionType()->AddOverload(fq2fh);
stdlib_fround_ = AsmType::FroundType(zone());
}
@ -550,7 +565,7 @@ void AsmJsParser::ValidateModuleVarNewStdlib(VarInfo* info) {
#define V(name, _junk1, _junk2, _junk3) \
case TOK(name): \
DeclareStdlibFunc(info, VarKind::kSpecial, AsmType::name()); \
stdlib_uses_.insert(StandardMember::k##name); \
stdlib_uses_.Add(StandardMember::k##name); \
break;
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
@ -573,14 +588,14 @@ void AsmJsParser::ValidateModuleVarStdlib(VarInfo* info) {
case TOK(name): \
DeclareGlobal(info, false, AsmType::Double(), kWasmF64, \
WasmInitExpr(const_value)); \
stdlib_uses_.insert(StandardMember::kMath##name); \
stdlib_uses_.Add(StandardMember::kMath##name); \
break;
STDLIB_MATH_VALUE_LIST(V)
#undef V
#define V(name, Name, op, sig) \
case TOK(name): \
DeclareStdlibFunc(info, VarKind::kMath##Name, stdlib_##sig##_); \
stdlib_uses_.insert(StandardMember::kMath##Name); \
stdlib_uses_.Add(StandardMember::kMath##Name); \
break;
STDLIB_MATH_FUNCTION_LIST(V)
#undef V
@ -590,11 +605,11 @@ void AsmJsParser::ValidateModuleVarStdlib(VarInfo* info) {
} else if (Check(TOK(Infinity))) {
DeclareGlobal(info, false, AsmType::Double(), kWasmF64,
WasmInitExpr(std::numeric_limits<double>::infinity()));
stdlib_uses_.insert(StandardMember::kInfinity);
stdlib_uses_.Add(StandardMember::kInfinity);
} else if (Check(TOK(NaN))) {
DeclareGlobal(info, false, AsmType::Double(), kWasmF64,
WasmInitExpr(std::numeric_limits<double>::quiet_NaN()));
stdlib_uses_.insert(StandardMember::kNaN);
stdlib_uses_.Add(StandardMember::kNaN);
} else {
FAIL("Invalid member of stdlib");
}
@ -1781,13 +1796,44 @@ AsmType* AsmJsParser::AdditiveExpression() {
AsmType* AsmJsParser::ShiftExpression() {
AsmType* a = nullptr;
RECURSEn(a = AdditiveExpression());
heap_access_shift_position_ = kNoHeapAccessShift;
// TODO(bradnelson): Implement backtracking to avoid emitting code
// for the x >>> 0 case (similar to what's there for |0).
for (;;) {
switch (scanner_.Token()) {
// TODO(bradnelson): Implement backtracking to avoid emitting code
// for the x >>> 0 case (similar to what's there for |0).
case TOK(SAR): {
EXPECT_TOKENn(TOK(SAR));
heap_access_shift_position_ = kNoHeapAccessShift;
// Remember position allowing this shift-expression to be used as part
// of a heap access operation expecting `a >> n:NumericLiteral`.
bool imm = false;
size_t old_pos;
size_t old_code;
uint32_t shift_imm;
if (a->IsA(AsmType::Intish()) && CheckForUnsigned(&shift_imm)) {
old_pos = scanner_.Position();
old_code = current_function_builder_->GetPosition();
scanner_.Rewind();
imm = true;
}
AsmType* b = nullptr;
RECURSEn(b = AdditiveExpression());
// Check for `a >> n:NumericLiteral` pattern.
if (imm && old_pos == scanner_.Position()) {
heap_access_shift_position_ = old_code;
heap_access_shift_value_ = shift_imm;
}
if (!(a->IsA(AsmType::Intish()) && b->IsA(AsmType::Intish()))) {
FAILn("Expected intish for operator >>.");
}
current_function_builder_->Emit(kExprI32ShrS);
a = AsmType::Signed();
continue;
}
#define HANDLE_CASE(op, opcode, name, result) \
case TOK(op): { \
EXPECT_TOKENn(TOK(op)); \
heap_access_shift_position_ = kNoHeapAccessShift; \
AsmType* b = nullptr; \
RECURSEn(b = AdditiveExpression()); \
if (!(a->IsA(AsmType::Intish()) && b->IsA(AsmType::Intish()))) { \
@ -1797,9 +1843,8 @@ AsmType* AsmJsParser::ShiftExpression() {
a = AsmType::result(); \
continue; \
}
HANDLE_CASE(SHL, I32Shl, "<<", Signed);
HANDLE_CASE(SAR, I32ShrS, ">>", Signed);
HANDLE_CASE(SHR, I32ShrU, ">>>", Unsigned);
HANDLE_CASE(SHL, I32Shl, "<<", Signed);
HANDLE_CASE(SHR, I32ShrU, ">>>", Unsigned);
#undef HANDLE_CASE
default:
return a;
@ -2169,12 +2214,18 @@ AsmType* AsmJsParser::ValidateCall() {
} else if (callable->CanBeInvokedWith(AsmType::Float(),
param_specific_types)) {
return_type = AsmType::Float();
} else if (callable->CanBeInvokedWith(AsmType::Floatish(),
param_specific_types)) {
return_type = AsmType::Floatish();
} else if (callable->CanBeInvokedWith(AsmType::Double(),
param_specific_types)) {
return_type = AsmType::Double();
} else if (callable->CanBeInvokedWith(AsmType::Signed(),
param_specific_types)) {
return_type = AsmType::Signed();
} else if (callable->CanBeInvokedWith(AsmType::Unsigned(),
param_specific_types)) {
return_type = AsmType::Unsigned();
} else {
FAILn("Function use doesn't match definition");
}
@ -2217,7 +2268,7 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->Emit(kExprF32Max);
}
}
} else if (param_specific_types[0]->IsA(AsmType::Int())) {
} else if (param_specific_types[0]->IsA(AsmType::Signed())) {
TemporaryVariableScope tmp_x(this);
TemporaryVariableScope tmp_y(this);
for (size_t i = 1; i < param_specific_types.size(); ++i) {
@ -2244,14 +2295,13 @@ AsmType* AsmJsParser::ValidateCall() {
if (param_specific_types[0]->IsA(AsmType::Signed())) {
TemporaryVariableScope tmp(this);
current_function_builder_->EmitTeeLocal(tmp.get());
current_function_builder_->Emit(kExprI32Clz);
current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
current_function_builder_->EmitGetLocal(tmp.get());
current_function_builder_->Emit(kExprElse);
current_function_builder_->EmitI32Const(0);
current_function_builder_->EmitI32Const(31);
current_function_builder_->Emit(kExprI32ShrS);
current_function_builder_->EmitTeeLocal(tmp.get());
current_function_builder_->Emit(kExprI32Xor);
current_function_builder_->EmitGetLocal(tmp.get());
current_function_builder_->Emit(kExprI32Sub);
current_function_builder_->Emit(kExprEnd);
} else if (param_specific_types[0]->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Abs);
} else if (param_specific_types[0]->IsA(AsmType::FloatQ())) {
@ -2262,12 +2312,9 @@ AsmType* AsmJsParser::ValidateCall() {
break;
case VarKind::kMathFround:
if (param_specific_types[0]->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF32ConvertF64);
} else {
DCHECK(param_specific_types[0]->IsA(AsmType::FloatQ()));
}
break;
// NOTE: Handled in {AsmJsParser::CallExpression} specially and treated
// as a coercion to "float" type. Cannot be reached as a call here.
UNREACHABLE();
default:
UNREACHABLE();
@ -2353,18 +2400,18 @@ void AsmJsParser::ValidateHeapAccess() {
info->type->IsA(AsmType::Uint8Array())) {
RECURSE(index_type = Expression(nullptr));
} else {
RECURSE(index_type = AdditiveExpression());
EXPECT_TOKEN(TOK(SAR));
uint32_t shift;
if (!CheckForUnsigned(&shift)) {
RECURSE(index_type = ShiftExpression());
if (heap_access_shift_position_ == kNoHeapAccessShift) {
FAIL("Expected shift of word size");
}
if (shift > 3) {
if (heap_access_shift_value_ > 3) {
FAIL("Expected valid heap access shift");
}
if ((1 << shift) != size) {
if ((1 << heap_access_shift_value_) != size) {
FAIL("Expected heap access shift to match heap view");
}
// Delete the code of the actual shift operation.
current_function_builder_->DeleteCodeAfter(heap_access_shift_position_);
// Mask bottom bits to match asm.js behavior.
current_function_builder_->EmitI32Const(~(size - 1));
current_function_builder_->Emit(kExprI32And);
@ -2460,3 +2507,5 @@ void AsmJsParser::GatherCases(ZoneVector<int32_t>* cases) {
} // namespace wasm
} // namespace internal
} // namespace v8
#undef RECURSE

13
deps/v8/src/asmjs/asm-parser.h

@ -47,10 +47,10 @@ class AsmJsParser {
};
// clang-format on
typedef std::unordered_set<StandardMember, std::hash<int>> StdlibSet;
typedef EnumSet<StandardMember, uint64_t> StdlibSet;
explicit AsmJsParser(Zone* zone, uintptr_t stack_limit,
std::unique_ptr<Utf16CharacterStream> stream);
Utf16CharacterStream* stream);
bool Run();
const char* failure_message() const { return failure_message_; }
int failure_location() const { return failure_location_; }
@ -183,7 +183,6 @@ class AsmJsParser {
// Types used for stdlib function and their set up.
AsmType* stdlib_dq2d_;
AsmType* stdlib_dqdq2d_;
AsmType* stdlib_fq2f_;
AsmType* stdlib_i2s_;
AsmType* stdlib_ii2s_;
AsmType* stdlib_minmax_;
@ -208,6 +207,14 @@ class AsmJsParser {
// aforementioned {call_coercion_deferred} is allowed.
size_t call_coercion_deferred_position_;
// The code position of the last heap access shift by an immediate value.
// For `heap[expr >> value:NumericLiteral]` this indicates from where to
// delete code when the expression is used as part of a valid heap access.
// Will be set to {kNoHeapAccessShift} if heap access shift wasn't matched.
size_t heap_access_shift_position_;
uint32_t heap_access_shift_value_;
static const size_t kNoHeapAccessShift = -1;
// Used to track the last label we've seen so it can be matched to later
// statements it's attached to.
AsmJsScanner::token_t pending_label_;

14
deps/v8/src/asmjs/asm-scanner.cc

@ -18,8 +18,9 @@ namespace {
static const int kMaxIdentifierCount = 0xf000000;
};
AsmJsScanner::AsmJsScanner()
: token_(kUninitialized),
AsmJsScanner::AsmJsScanner(Utf16CharacterStream* stream)
: stream_(stream),
token_(kUninitialized),
preceding_token_(kUninitialized),
next_token_(kUninitialized),
position_(0),
@ -44,14 +45,6 @@ AsmJsScanner::AsmJsScanner()
#define V(name) global_names_[#name] = kToken_##name;
KEYWORD_NAME_LIST(V)
#undef V
}
// Destructor of unique_ptr<T> requires complete declaration of T, we only want
// to include the necessary declaration here instead of the header file.
AsmJsScanner::~AsmJsScanner() {}
void AsmJsScanner::SetStream(std::unique_ptr<Utf16CharacterStream> stream) {
stream_ = std::move(stream);
Next();
}
@ -210,6 +203,7 @@ std::string AsmJsScanner::Name(token_t token) const {
SPECIAL_TOKEN_LIST(V)
default:
break;
#undef V
}
UNREACHABLE();
}

8
deps/v8/src/asmjs/asm-scanner.h

@ -31,11 +31,7 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
public:
typedef int32_t token_t;
AsmJsScanner();
~AsmJsScanner();
// Pick the stream to parse (must be called before anything else).
void SetStream(std::unique_ptr<Utf16CharacterStream> stream);
explicit AsmJsScanner(Utf16CharacterStream* stream);
// Get current token.
token_t Token() const { return token_; }
@ -140,7 +136,7 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
// clang-format on
private:
std::unique_ptr<Utf16CharacterStream> stream_;
Utf16CharacterStream* stream_;
token_t token_;
token_t preceding_token_;
token_t next_token_; // Only set when in {rewind} state.

45
deps/v8/src/asmjs/asm-types.cc

@ -234,21 +234,6 @@ AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) {
return reinterpret_cast<AsmType*>(MinMax);
}
bool AsmFFIType::CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) {
if (return_type->IsExactly(AsmType::Float())) {
return false;
}
for (size_t ii = 0; ii < args.size(); ++ii) {
if (!args[ii]->IsA(AsmType::Extern())) {
return false;
}
}
return true;
}
bool AsmFunctionType::IsA(AsmType* other) {
auto* that = other->AsFunctionType();
if (that == nullptr) {
@ -319,36 +304,6 @@ void AsmOverloadedFunctionType::AddOverload(AsmType* overload) {
overloads_.push_back(overload);
}
AsmFunctionTableType::AsmFunctionTableType(size_t length, AsmType* signature)
: length_(length), signature_(signature) {
DCHECK(signature_ != nullptr);
DCHECK(signature_->AsFunctionType() != nullptr);
}
namespace {
// ToString is used for reporting function tables' names. It converts its
// argument to uint32_t because asm.js integers are 32-bits, thus effectively
// limiting the max function table's length.
std::string ToString(size_t s) {
auto u32 = static_cast<uint32_t>(s);
// 16 bytes is more than enough to represent a 32-bit integer as a base 10
// string.
char digits[16];
int length = base::OS::SNPrintF(digits, arraysize(digits), "%" PRIu32, u32);
DCHECK_NE(length, -1);
return std::string(digits, length);
}
} // namespace
std::string AsmFunctionTableType::Name() {
return "(" + signature_->Name() + ")[" + ToString(length_) + "]";
}
bool AsmFunctionTableType::CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) {
return signature_->AsCallableType()->CanBeInvokedWith(return_type, args);
}
} // namespace wasm
} // namespace internal
} // namespace v8

108
deps/v8/src/asmjs/asm-types.h

@ -18,10 +18,8 @@ namespace internal {
namespace wasm {
class AsmType;
class AsmFFIType;
class AsmFunctionType;
class AsmOverloadedFunctionType;
class AsmFunctionTableType;
// List of V(CamelName, string_name, number, parent_types)
#define FOR_EACH_ASM_VALUE_TYPE_LIST(V) \
@ -58,9 +56,7 @@ class AsmFunctionTableType;
// List of V(CamelName)
#define FOR_EACH_ASM_CALLABLE_TYPE_LIST(V) \
V(FunctionType) \
V(FFIType) \
V(OverloadedFunctionType) \
V(FunctionTableType)
V(OverloadedFunctionType)
class AsmValueType {
public:
@ -176,45 +172,6 @@ class V8_EXPORT_PRIVATE AsmOverloadedFunctionType final
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmOverloadedFunctionType);
};
class V8_EXPORT_PRIVATE AsmFFIType final : public AsmCallableType {
public:
AsmFFIType* AsFFIType() override { return this; }
std::string Name() override { return "Function"; }
bool CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) override;
private:
friend AsmType;
AsmFFIType() = default;
DISALLOW_COPY_AND_ASSIGN(AsmFFIType);
};
class V8_EXPORT_PRIVATE AsmFunctionTableType : public AsmCallableType {
public:
AsmFunctionTableType* AsFunctionTableType() override { return this; }
std::string Name() override;
bool CanBeInvokedWith(AsmType* return_type,
const ZoneVector<AsmType*>& args) override;
size_t length() const { return length_; }
AsmType* signature() { return signature_; }
private:
friend class AsmType;
AsmFunctionTableType(size_t length, AsmType* signature);
size_t length_;
AsmType* signature_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmFunctionTableType);
};
class V8_EXPORT_PRIVATE AsmType {
public:
#define DEFINE_CONSTRUCTOR(CamelName, string_name, number, parent_types) \
@ -256,19 +213,6 @@ class V8_EXPORT_PRIVATE AsmType {
// The (variadic) type for min and max.
static AsmType* MinMaxType(Zone* zone, AsmType* dest, AsmType* src);
// The type for foreign functions.
static AsmType* FFIType(Zone* zone) {
auto* f = new (zone) AsmFFIType();
return reinterpret_cast<AsmType*>(f);
}
// The type for function tables.
static AsmType* FunctionTableType(Zone* zone, size_t length,
AsmType* signature) {
auto* f = new (zone) AsmFunctionTableType(length, signature);
return reinterpret_cast<AsmType*>(f);
}
std::string Name();
// IsExactly returns true if this is the exact same type as that. For
// non-value types (e.g., callables), this returns this == that.
@ -278,56 +222,6 @@ class V8_EXPORT_PRIVATE AsmType {
// returns this == that.
bool IsA(AsmType* that);
// Types allowed in return statements. void is the type for returns without
// an expression.
bool IsReturnType() {
return this == AsmType::Void() || this == AsmType::Double() ||
this == AsmType::Signed() || this == AsmType::Float();
}
// Converts this to the corresponding valid argument type.
AsmType* ToReturnType() {
if (this->IsA(AsmType::Signed())) {
return AsmType::Signed();
}
if (this->IsA(AsmType::Double())) {
return AsmType::Double();
}
if (this->IsA(AsmType::Float())) {
return AsmType::Float();
}
if (this->IsA(AsmType::Void())) {
return AsmType::Void();
}
return AsmType::None();
}
// Types allowed to be parameters in asm functions.
bool IsParameterType() {
return this == AsmType::Double() || this == AsmType::Int() ||
this == AsmType::Float();
}
// Converts this to the corresponding valid argument type.
AsmType* ToParameterType() {
if (this->IsA(AsmType::Int())) {
return AsmType::Int();
}
if (this->IsA(AsmType::Double())) {
return AsmType::Double();
}
if (this->IsA(AsmType::Float())) {
return AsmType::Float();
}
return AsmType::None();
}
// Types allowed to be compared using the comparison operators.
bool IsComparableType() {
return this == AsmType::Double() || this == AsmType::Signed() ||
this == AsmType::Unsigned() || this == AsmType::Float();
}
// The following methods are meant to be used for inspecting the traits of
// element types for the heap view types.
enum : int32_t { kNotHeapType = -1 };

262
deps/v8/src/assembler.cc

@ -47,6 +47,7 @@
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/compiler/code-assembler.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@ -54,6 +55,7 @@
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/ostreams.h"
@ -158,7 +160,8 @@ AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
constant_pool_available_(false) {
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
own_buffer_ = buffer == NULL;
if (buffer_size == 0) buffer_size = kMinimalBufferSize;
DCHECK(buffer_size > 0);
@ -188,14 +191,12 @@ void AssemblerBase::Print(Isolate* isolate) {
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_, nullptr);
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
: PredictableCodeSizeScope(assembler, -1) {}
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@ -205,7 +206,6 @@ PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
assembler_->set_predictable_code_size(true);
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
@ -214,7 +214,6 @@ PredictableCodeSizeScope::~PredictableCodeSizeScope() {
assembler_->set_predictable_code_size(old_value_);
}
// -----------------------------------------------------------------------------
// Implementation of CpuFeatureScope
@ -232,7 +231,6 @@ CpuFeatureScope::~CpuFeatureScope() {
}
#endif
bool CpuFeatures::initialized_ = false;
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
@ -309,8 +307,18 @@ void RelocInfo::update_wasm_memory_reference(
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references
// may change over time.
unchecked_update_wasm_memory_reference(isolate, updated_reference,
icache_flush_mode);
set_embedded_address(isolate, updated_reference, icache_flush_mode);
}
void RelocInfo::set_global_handle(Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
set_embedded_address(isolate, address, icache_flush_mode);
}
Address RelocInfo::global_handle() const {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
return embedded_address();
}
void RelocInfo::update_wasm_memory_size(Isolate* isolate, uint32_t old_size,
@ -320,8 +328,7 @@ void RelocInfo::update_wasm_memory_size(Isolate* isolate, uint32_t old_size,
uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(isolate, updated_size_reference,
icache_flush_mode);
set_embedded_size(isolate, updated_size_reference, icache_flush_mode);
}
void RelocInfo::update_wasm_global_reference(
@ -332,19 +339,34 @@ void RelocInfo::update_wasm_global_reference(
DCHECK_LE(old_base, wasm_global_reference());
updated_reference = new_base + (wasm_global_reference() - old_base);
DCHECK_LE(new_base, updated_reference);
unchecked_update_wasm_memory_reference(isolate, updated_reference,
icache_flush_mode);
set_embedded_address(isolate, updated_reference, icache_flush_mode);
}
Address RelocInfo::wasm_global_reference() const {
DCHECK(IsWasmGlobalReference(rmode_));
return embedded_address();
}
uint32_t RelocInfo::wasm_function_table_size_reference() const {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return embedded_size();
}
uint32_t RelocInfo::wasm_memory_size_reference() const {
DCHECK(IsWasmMemorySizeReference(rmode_));
return embedded_size();
}
Address RelocInfo::wasm_memory_reference() const {
DCHECK(IsWasmMemoryReference(rmode_));
return embedded_address();
}
void RelocInfo::update_wasm_function_table_size_reference(
Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
uint32_t current_size_reference = wasm_function_table_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(isolate, updated_size_reference,
icache_flush_mode);
set_embedded_size(isolate, new_size, icache_flush_mode);
}
void RelocInfo::set_target_address(Isolate* isolate, Address target,
@ -380,7 +402,6 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
return pc_delta & kSmallPCDeltaMask;
}
void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
// Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
pc_delta = WriteLongPCJump(pc_delta);
@ -391,13 +412,11 @@ void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
*--pos_ = static_cast<byte>(data_delta);
}
void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
*--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
}
void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
// Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
pc_delta = WriteLongPCJump(pc_delta);
@ -405,7 +424,6 @@ void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
*--pos_ = pc_delta;
}
void RelocInfoWriter::WriteIntData(int number) {
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(number);
@ -414,7 +432,6 @@ void RelocInfoWriter::WriteIntData(int number) {
}
}
void RelocInfoWriter::WriteData(intptr_t data_delta) {
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
@ -423,7 +440,6 @@ void RelocInfoWriter::WriteData(intptr_t data_delta) {
}
}
void RelocInfoWriter::Write(const RelocInfo* rinfo) {
RelocInfo::Mode rmode = rinfo->rmode();
#ifdef DEBUG
@ -462,28 +478,23 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#endif
}
inline int RelocIterator::AdvanceGetTag() {
return *--pos_ & kTagMask;
}
inline RelocInfo::Mode RelocIterator::GetMode() {
return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
((1 << kLongTagBits) - 1));
}
inline void RelocIterator::ReadShortTaggedPC() {
rinfo_.pc_ += *pos_ >> kTagBits;
}
inline void RelocIterator::AdvanceReadPC() {
rinfo_.pc_ += *--pos_;
}
void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@ -492,7 +503,6 @@ void RelocIterator::AdvanceReadInt() {
rinfo_.data_ = x;
}
void RelocIterator::AdvanceReadData() {
intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) {
@ -501,7 +511,6 @@ void RelocIterator::AdvanceReadData() {
rinfo_.data_ = x;
}
void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
// pc jump in kChunkBits bit chunks and shift them into place.
@ -522,7 +531,6 @@ inline void RelocIterator::ReadShortData() {
rinfo_.data_ = unsigned_b;
}
void RelocIterator::next() {
DCHECK(!done());
// Basically, do the opposite of RelocInfoWriter::Write.
@ -574,15 +582,6 @@ void RelocIterator::next() {
}
}
}
if (code_age_sequence_ != NULL) {
byte* old_code_age_sequence = code_age_sequence_;
code_age_sequence_ = NULL;
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
rinfo_.data_ = 0;
rinfo_.pc_ = old_code_age_sequence;
return;
}
}
done_ = true;
}
@ -595,15 +594,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
byte* sequence = code->FindCodeAgeSequence();
// We get the isolate from the map, because at serialization time
// the code pointer has been cloned and isn't really in heap space.
Isolate* isolate = code->map()->GetIsolate();
if (sequence != NULL && !Code::IsYoungSequence(isolate, sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
}
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -616,19 +606,13 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
return DebugCodegen::DebugBreakSlotIsPatched(pc_);
}
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
@ -636,14 +620,12 @@ bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
// generation.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::kApplyMask;
RelocIterator it(desc, mode_mask);
return !it.done();
}
#endif
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
@ -655,8 +637,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case CODE_TARGET:
return "code target";
case CELL:
return "property cell";
case RUNTIME_ENTRY:
return "runtime entry";
case COMMENT:
@ -679,16 +659,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
case DEBUG_BREAK_SLOT_AT_POSITION:
return "debug break slot at position";
case DEBUG_BREAK_SLOT_AT_RETURN:
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
return "debug break slot at tail call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
case WASM_MEMORY_SIZE_REFERENCE:
@ -699,6 +669,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "wasm function table size reference";
case WASM_PROTECTED_INSTRUCTION_LANDING:
return "wasm protected instruction landing";
case WASM_GLOBAL_HANDLE:
return "global handle";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@ -706,7 +678,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "unknown relocation type";
}
void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
@ -744,16 +715,12 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
}
#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
break;
case CELL:
Object::VerifyPointer(target_cell());
break;
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
@ -783,14 +750,11 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case WASM_MEMORY_REFERENCE:
case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
case WASM_PROTECTED_INSTRUCTION_LANDING:
// TODO(eholk): make sure the protected instruction is in range.
case NONE32:
@ -800,14 +764,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case PC_JUMP:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
break;
}
}
#endif // VERIFY_HEAP
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@ -822,7 +782,6 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
UNREACHABLE();
}
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
@ -842,25 +801,22 @@ ExternalReference::ExternalReference(
Isolate* isolate = NULL)
: address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
: address_(isolate->builtins()->builtin_address(name)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
: ExternalReference(Runtime::FunctionForId(id), isolate) {}
ExternalReference::ExternalReference(const Runtime::Function* f,
Isolate* isolate)
: address_(Redirect(isolate, f->entry,
BuiltinCallTypeForResultSize(f->result_size))) {}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
return ExternalReference(isolate->builtins()->builtins_table_address());
}
ExternalReference ExternalReference::interpreter_dispatch_table_address(
Isolate* isolate) {
return ExternalReference(isolate->interpreter()->dispatch_table_address());
@ -872,6 +828,12 @@ ExternalReference ExternalReference::interpreter_dispatch_counters(
isolate->interpreter()->bytecode_dispatch_counters_table());
}
ExternalReference ExternalReference::bytecode_size_table_address(
Isolate* isolate) {
return ExternalReference(
interpreter::Bytecodes::bytecode_size_table_address());
}
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@ -881,7 +843,6 @@ ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
@ -889,14 +850,6 @@ ExternalReference ExternalReference::
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
ExternalReference
ExternalReference::incremental_marking_record_write_code_entry_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteOfCodeEntryFromCode)));
}
ExternalReference ExternalReference::store_buffer_overflow_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -904,7 +857,6 @@ ExternalReference ExternalReference::store_buffer_overflow_function(
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
}
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -912,27 +864,11 @@ ExternalReference ExternalReference::delete_handle_scope_extensions(
FUNCTION_ADDR(HandleScope::DeleteExtensions)));
}
ExternalReference ExternalReference::get_date_field_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
}
ExternalReference ExternalReference::get_make_code_young_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
}
ExternalReference ExternalReference::get_mark_code_as_executed_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@ -949,14 +885,12 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
}
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
}
ExternalReference ExternalReference::compute_output_frames_function(
Isolate* isolate) {
return ExternalReference(
@ -1125,7 +1059,6 @@ ExternalReference ExternalReference::log_enter_external_function(
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
}
ExternalReference ExternalReference::log_leave_external_function(
Isolate* isolate) {
return ExternalReference(
@ -1136,24 +1069,20 @@ ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_array_start());
}
ExternalReference ExternalReference::allocation_sites_list_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->allocation_sites_list_address());
}
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
ExternalReference ExternalReference::address_of_real_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
}
ExternalReference ExternalReference::address_of_regexp_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->regexp_stack()->limit_address());
@ -1168,120 +1097,104 @@ ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
ExternalReference ExternalReference::heap_is_marking_flag_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
}
ExternalReference ExternalReference::new_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::new_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::old_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
}
ExternalReference ExternalReference::handle_scope_next_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_next_address(isolate));
}
ExternalReference ExternalReference::handle_scope_limit_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_limit_address(isolate));
}
ExternalReference ExternalReference::scheduled_exception_address(
Isolate* isolate) {
return ExternalReference(isolate->scheduled_exception_address());
}
ExternalReference ExternalReference::address_of_pending_message_obj(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_obj_address());
}
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
ExternalReference ExternalReference::address_of_one_half() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
}
ExternalReference ExternalReference::address_of_minus_one_half() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_one_half));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.negative_infinity));
}
ExternalReference ExternalReference::address_of_the_hole_nan() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.the_hole_nan));
}
ExternalReference ExternalReference::address_of_uint32_bias() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.uint32_bias));
}
ExternalReference ExternalReference::address_of_float_abs_constant() {
return ExternalReference(reinterpret_cast<void*>(&float_absolute_constant));
}
ExternalReference ExternalReference::address_of_float_neg_constant() {
return ExternalReference(reinterpret_cast<void*>(&float_negate_constant));
}
ExternalReference ExternalReference::address_of_double_abs_constant() {
return ExternalReference(reinterpret_cast<void*>(&double_absolute_constant));
}
ExternalReference ExternalReference::address_of_double_neg_constant() {
return ExternalReference(reinterpret_cast<void*>(&double_negate_constant));
}
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
return ExternalReference(isolate->is_profiling_address());
}
ExternalReference ExternalReference::invoke_function_callback(
Isolate* isolate) {
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
@ -1290,7 +1203,6 @@ ExternalReference ExternalReference::invoke_function_callback(
return ExternalReference(&thunk_fun, thunk_type, isolate);
}
ExternalReference ExternalReference::invoke_accessor_getter_callback(
Isolate* isolate) {
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
@ -1300,7 +1212,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback(
return ExternalReference(&thunk_fun, thunk_type, isolate);
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
@ -1328,7 +1239,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
return ExternalReference(Redirect(isolate, function));
}
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
@ -1341,7 +1251,6 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
ExternalReference ExternalReference::re_word_character_map() {
return ExternalReference(
NativeRegExpMacroAssembler::word_character_map_address());
@ -1511,10 +1420,9 @@ ExternalReference ExternalReference::orderedhashmap_gethash_raw(
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
template <typename CollectionType, int entrysize>
ExternalReference ExternalReference::orderedhashtable_has_raw(
Isolate* isolate) {
auto f = OrderedHashTable<CollectionType, entrysize>::HasKey;
ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
typedef Smi* (*GetOrCreateHash)(Isolate * isolate, Object * key);
GetOrCreateHash f = Object::GetOrCreateHash;
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
@ -1524,6 +1432,10 @@ ExternalReference ExternalReference::try_internalize_string_function(
isolate, FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
}
ExternalReference ExternalReference::check_object_type(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CheckObjectType)));
}
#ifdef V8_INTL_SUPPORT
ExternalReference ExternalReference::intl_convert_one_byte_to_lower(
Isolate* isolate) {
@ -1548,22 +1460,15 @@ ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
template ExternalReference
ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
template ExternalReference
ExternalReference::orderedhashtable_has_raw<OrderedHashMap, 2>(Isolate*);
template ExternalReference
ExternalReference::orderedhashtable_has_raw<OrderedHashSet, 1>(Isolate*);
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
return ExternalReference(entry);
}
ExternalReference ExternalReference::cpu_features() {
DCHECK(CpuFeatures::initialized_);
return ExternalReference(&CpuFeatures::supported_);
@ -1590,7 +1495,6 @@ ExternalReference ExternalReference::runtime_function_table_address(
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
}
double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
@ -1609,7 +1513,6 @@ double power_helper(Isolate* isolate, double x, double y) {
return power_double_double(x, y);
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1628,7 +1531,6 @@ double power_double_int(double x, int y) {
return p;
}
double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
@ -1638,7 +1540,6 @@ double power_double_double(double x, double y) {
return Pow(x, y);
}
ExternalReference ExternalReference::power_double_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@ -1646,7 +1547,6 @@ ExternalReference ExternalReference::power_double_double_function(
BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@ -1674,22 +1574,18 @@ ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
}
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
bool operator!=(ExternalReference lhs, ExternalReference rhs) {
return !(lhs == rhs);
}
size_t hash_value(ExternalReference reference) {
return base::hash<Address>()(reference.address());
}
std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
os << static_cast<const void*>(reference.address());
const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
@ -1697,7 +1593,6 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
@ -1705,7 +1600,6 @@ ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
}
ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
ConstantPoolEntry::Type type) const {
const PerTypeEntryInfo& info = info_[type];
@ -1735,7 +1629,6 @@ ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
return ConstantPoolEntry::REGULAR;
}
ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
DCHECK(!emitted_label_.is_bound());
@ -1784,7 +1677,6 @@ ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
return access;
}
void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
ConstantPoolEntry::Type type) {
PerTypeEntryInfo& info = info_[type];
@ -1810,7 +1702,6 @@ void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
}
}
void ConstantPoolBuilder::EmitGroup(Assembler* assm,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@ -1869,7 +1760,6 @@ void ConstantPoolBuilder::EmitGroup(Assembler* assm,
}
}
// Emit and return position of pool. Zero implies no constant pool.
int ConstantPoolBuilder::Emit(Assembler* assm) {
bool emitted = emitted_label_.is_bound();
@ -1922,7 +1812,6 @@ void Assembler::RecordDeoptReason(DeoptimizeReason reason,
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
EnsureSpace ensure_space(this);
@ -1930,14 +1819,6 @@ void Assembler::RecordComment(const char* msg) {
}
}
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
RecordRelocInfo(mode);
}
void Assembler::DataAlign(int m) {
DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
@ -1950,5 +1831,22 @@ void Assembler::RequestHeapObject(HeapObjectRequest request) {
heap_object_requests_.push_front(request);
}
namespace {
int caller_saved_codes[kNumJSCallerSaved];
}
void SetUpJSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
DCHECK(i == kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) {
DCHECK(0 <= n && n < kNumJSCallerSaved);
return caller_saved_codes[n];
}
} // namespace internal
} // namespace v8

228
deps/v8/src/assembler.h

@ -45,6 +45,7 @@
#include "src/label.h"
#include "src/log.h"
#include "src/register-configuration.h"
#include "src/reglist.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -59,6 +60,31 @@ class Isolate;
class SourcePosition;
class StatsCounter;
void SetUpJSCallerSavedCodeData();
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
class JumpOptimizationInfo {
public:
bool is_collecting() const { return stage_ == kCollection; }
bool is_optimizing() const { return stage_ == kOptimization; }
void set_optimizing() { stage_ = kOptimization; }
bool is_optimizable() const { return optimizable_; }
void set_optimizable() { optimizable_ = true; }
std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
private:
enum { kCollection, kOptimization } stage_ = kCollection;
bool optimizable_ = false;
std::vector<uint32_t> farjmp_bitmap_;
};
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@ -113,6 +139,13 @@ class AssemblerBase: public Malloced {
}
}
JumpOptimizationInfo* jump_optimization_info() {
return jump_optimization_info_;
}
void set_jump_optimization_info(JumpOptimizationInfo* jump_opt) {
jump_optimization_info_ = jump_opt;
}
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
@ -159,6 +192,8 @@ class AssemblerBase: public Malloced {
// if the pp register points to the current code object's constant pool.
bool constant_pool_available_;
JumpOptimizationInfo* jump_optimization_info_;
// Constant pool.
friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope;
@ -333,17 +368,11 @@ class RelocInfo {
WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_PROTECTED_INSTRUCTION_LANDING,
CELL,
WASM_GLOBAL_HANDLE,
RUNTIME_ENTRY,
COMMENT,
// Additional code inserted for debug break slot.
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_TAIL_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@ -368,14 +397,12 @@ class RelocInfo {
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT,
FIRST_SHAREABLE_RELOC_MODE = CELL,
FIRST_SHAREABLE_RELOC_MODE = RUNTIME_ENTRY,
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
@ -394,7 +421,6 @@ class RelocInfo {
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
}
static inline bool IsCell(Mode mode) { return mode == CELL; }
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
@ -429,25 +455,9 @@ class RelocInfo {
static inline bool IsInternalReferenceEncoded(Mode mode) {
return mode == INTERNAL_REFERENCE_ENCODED;
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
}
static inline bool IsDebugBreakSlotAtReturn(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_RETURN;
}
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
@ -461,16 +471,15 @@ class RelocInfo {
return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
return IsWasmPtrReference(mode) || IsWasmSizeReference(mode);
}
static inline bool IsWasmSizeReference(Mode mode) {
return mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_GLOBAL_HANDLE;
}
static inline bool IsWasmProtectedLanding(Mode mode) {
return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
@ -501,10 +510,12 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
Address wasm_memory_reference();
Address wasm_global_reference();
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
Address wasm_memory_reference() const;
Address wasm_global_reference() const;
uint32_t wasm_function_table_size_reference() const;
uint32_t wasm_memory_size_reference() const;
Address global_handle() const;
void update_wasm_memory_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
@ -522,6 +533,10 @@ class RelocInfo {
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_global_handle(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
@ -541,10 +556,6 @@ class RelocInfo {
INLINE(void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Code> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(
Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
@ -581,25 +592,13 @@ class RelocInfo {
// can only be called if rmode_ is INTERNAL_REFERENCE.
INLINE(Address target_internal_reference_address());
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
// instruction.
INLINE(Address debug_call_address());
INLINE(void set_debug_call_address(Isolate*, Address target));
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
INLINE(void WipeOut(Isolate* isolate));
template<typename StaticVisitor> inline void Visit(Heap* heap);
template <typename ObjectVisitor>
inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Check whether this debug break slot has been patched with a call to the
// debugger.
bool IsPatchedDebugBreakSlotSequence();
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
@ -616,16 +615,16 @@ class RelocInfo {
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
void unchecked_update_wasm_memory_reference(Isolate* isolate, Address address,
ICacheFlushMode flush_mode);
void unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode);
void set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode);
void set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode);
uint32_t embedded_size() const;
Address embedded_address() const;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
@ -736,7 +735,6 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@ -819,8 +817,6 @@ class ExternalReference BASE_EMBEDDED {
ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
ExternalReference(Builtins::Name name, Isolate* isolate);
ExternalReference(Runtime::FunctionId id, Isolate* isolate);
ExternalReference(const Runtime::Function* f, Isolate* isolate);
@ -834,17 +830,19 @@ class ExternalReference BASE_EMBEDDED {
// Isolate as an external reference.
static ExternalReference isolate_address(Isolate* isolate);
// The builtins table as an external reference, used by lazy deserialization.
static ExternalReference builtins_address(Isolate* isolate);
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
static ExternalReference interpreter_dispatch_counters(Isolate* isolate);
static ExternalReference bytecode_size_table_address(Isolate* isolate);
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_marking_record_write_code_entry_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
@ -852,9 +850,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
static ExternalReference get_mark_code_as_executed_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
@ -925,6 +920,7 @@ class ExternalReference BASE_EMBEDDED {
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
static ExternalReference heap_is_marking_flag_address(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
@ -985,6 +981,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference try_internalize_string_function(Isolate* isolate);
static ExternalReference check_object_type(Isolate* isolate);
#ifdef V8_INTL_SUPPORT
static ExternalReference intl_convert_one_byte_to_lower(Isolate* isolate);
static ExternalReference intl_to_latin1_lower_table(Isolate* isolate);
@ -995,8 +993,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
template <typename CollectionType, int entrysize>
static ExternalReference orderedhashtable_has_raw(Isolate* isolate);
static ExternalReference get_or_create_hash_raw(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@ -1007,7 +1004,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_hook_on_function_call_address(
Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
@ -1085,54 +1081,11 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Utility functions
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
num_bits_set += x & 1;
}
return num_bits_set;
}
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_helper(Isolate* isolate, double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class CallWrapper {
public:
CallWrapper() { }
virtual ~CallWrapper() { }
// Called just before emitting a call. Argument is the size of the generated
// call code.
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
// Return whether call needs to check for debug stepping.
virtual bool NeedsDebugHookCheck() const { return false; }
};
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
virtual ~NullCallWrapper() { }
virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const { }
};
class CheckDebugStepCallWrapper : public CallWrapper {
public:
CheckDebugStepCallWrapper() {}
virtual ~CheckDebugStepCallWrapper() {}
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {}
virtual bool NeedsDebugHookCheck() const { return true; }
};
// -----------------------------------------------------------------------------
// Constant pool support
@ -1301,6 +1254,57 @@ class HeapObjectRequest {
int offset_;
};
// Base type for CPU Registers.
//
// 1) We would prefer to use an enum for registers, but enum values are
// assignment-compatible with int, which has caused code-generation bugs.
//
// 2) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the class in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
template <typename SubType, int kAfterLastRegister>
class RegisterBase {
public:
static constexpr int kCode_no_reg = -1;
static constexpr int kNumRegisters = kAfterLastRegister;
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
template <int code>
static constexpr SubType from_code() {
static_assert(code >= 0 && code < kNumRegisters, "must be valid reg code");
return SubType{code};
}
static SubType from_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kNumRegisters, code);
return SubType{code};
}
bool is_valid() const { return reg_code_ != kCode_no_reg; }
int code() const {
DCHECK(is_valid());
return reg_code_;
}
int bit() const { return 1 << code(); }
inline bool operator==(SubType other) const {
return reg_code_ == other.reg_code_;
}
inline bool operator!=(SubType other) const { return !(*this == other); }
protected:
explicit constexpr RegisterBase(int code) : reg_code_(code) {}
int reg_code_;
};
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_

3
deps/v8/src/assert-scope.cc

@ -15,7 +15,8 @@ namespace internal {
namespace {
struct PerThreadAssertKeyConstructTrait final {
static void Construct(base::Thread::LocalStorageKey* key) {
static void Construct(void* key_arg) {
auto key = reinterpret_cast<base::Thread::LocalStorageKey*>(key_arg);
*key = base::Thread::CreateThreadLocalKey();
}
};

24
deps/v8/src/ast/ast-expression-rewriter.cc

@ -116,9 +116,11 @@ void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, tag);
ZoneList<CaseClause*>* clauses = node->cases();
for (int i = 0; i < clauses->length(); i++) {
AST_REWRITE_LIST_ELEMENT(CaseClause, clauses, i);
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) {
AST_REWRITE_PROPERTY(Expression, clause, label);
}
VisitStatements(clause->statements());
}
}
@ -265,6 +267,10 @@ void AstExpressionRewriter::VisitAssignment(Assignment* node) {
AST_REWRITE_PROPERTY(Expression, node, value);
}
void AstExpressionRewriter::VisitCompoundAssignment(CompoundAssignment* node) {
VisitAssignment(node);
}
void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
@ -368,14 +374,6 @@ void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
}
void AstExpressionRewriter::VisitCaseClause(CaseClause* node) {
if (!node->is_default()) {
AST_REWRITE_PROPERTY(Expression, node, label);
}
VisitStatements(node->statements());
}
void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
@ -384,6 +382,10 @@ void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
void AstExpressionRewriter::VisitGetTemplateObject(GetTemplateObject* node) {
NOTHING();
}
void AstExpressionRewriter::VisitImportCallExpression(
ImportCallExpression* node) {
REWRITE_THIS(node);

183
deps/v8/src/ast/ast-numbering.cc

@ -19,14 +19,11 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
bool collect_type_profile = false)
: zone_(zone),
eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()),
suspend_count_(0),
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
disable_fullcodegen_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
dont_self_optimize_(false),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit);
}
@ -51,26 +48,14 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
int ReserveId() {
int tmp = next_id_;
next_id_ += 1;
return tmp;
}
void IncrementNodeCount() { properties_.add_node_count(1); }
void DisableSelfOptimization() { dont_self_optimize_ = true; }
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
void DisableFullCodegen(BailoutReason reason) {
disable_fullcodegen_reason_ = reason;
}
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
&slot_cache_);
function_kind_, &slot_cache_);
}
class LanguageModeScope {
@ -92,15 +77,13 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_;
int suspend_count_;
AstProperties properties_;
LanguageMode language_mode_;
FunctionKind function_kind_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
BailoutReason disable_fullcodegen_reason_;
BailoutReason dont_optimize_reason_;
bool dont_self_optimize_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@ -109,77 +92,55 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
IncrementNodeCount();
Visit(node->statement());
}
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
DisableFullCodegen(kDebuggerStatement);
}
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
IncrementNodeCount();
Visit(node->block());
Visit(node->result());
}
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
DisableFullCodegen(kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
DisableFullCodegen(kReferenceToModuleVariable);
break;
default:
break;
}
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
@ -194,22 +155,17 @@ void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
}
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->home_object());
}
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
@ -217,60 +173,57 @@ void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
IncrementNodeCount();
Visit(node->expression());
DCHECK(!node->is_async_return() || disable_fullcodegen_reason_ != kNoReason);
}
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
node->set_suspend_id(suspend_count_);
suspend_count_++;
IncrementNodeCount();
Visit(node->expression());
}
void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
VisitSuspend(node);
node->set_suspend_id(suspend_count_++);
if (IsAsyncGeneratorFunction(function_kind_)) {
node->set_await_iterator_close_suspend_id(suspend_count_++);
node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
}
Visit(node->expression());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitThrow(Throw* node) {
IncrementNodeCount();
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
VariableProxy* proxy = node->expression()->AsVariableProxy();
VisitVariableProxy(proxy, INSIDE_TYPEOF);
} else {
Visit(node->expression());
}
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
IncrementNodeCount();
Visit(node->expression());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
Scope* scope = node->scope();
if (scope != nullptr) {
LanguageModeScope language_mode_scope(this, scope->language_mode());
@ -288,30 +241,23 @@ void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
}
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
IncrementNodeCount();
VisitVariableProxy(node->proxy());
VisitFunctionLiteral(node->fun());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
DisableFullCodegen(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
@ -320,9 +266,6 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
@ -332,23 +275,18 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
IncrementNodeCount();
DisableFullCodegen(kTryCatchStatement);
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableFullCodegen(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
void AstNumberingVisitor::VisitPropertyReference(Property* node) {
IncrementNodeCount();
Visit(node->key());
Visit(node->obj());
}
@ -371,17 +309,17 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
Visit(node->value());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
VisitBinaryOperation(node->binary_operation());
VisitAssignment(node);
}
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
IncrementNodeCount();
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
@ -389,16 +327,12 @@ void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
IncrementNodeCount();
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
// We can only get here from spread calls currently.
DisableFullCodegen(kSpreadCall);
Visit(node->expression());
}
@ -407,23 +341,18 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
}
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
IncrementNodeCount();
DisableFullCodegen(kGetIterator);
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
IncrementNodeCount();
DisableFullCodegen(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_osr_id(ReserveId());
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
@ -434,9 +363,6 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
DisableFullCodegen(kForOfStatement);
node->set_osr_id(ReserveId());
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
@ -448,7 +374,6 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
void AstNumberingVisitor::VisitConditional(Conditional* node) {
IncrementNodeCount();
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
@ -456,7 +381,6 @@ void AstNumberingVisitor::VisitConditional(Conditional* node) {
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
IncrementNodeCount();
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@ -466,27 +390,16 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
IncrementNodeCount();
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) {
VisitCaseClause(cases->at(i));
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) Visit(clause->label());
VisitStatements(clause->statements());
ReserveFeedbackSlots(clause);
}
}
void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
IncrementNodeCount();
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_osr_id(ReserveId());
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
@ -497,14 +410,9 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
DisableFullCodegen(kClassLiteral);
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
VisitVariableProxy(node->class_variable_proxy());
}
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
@ -513,7 +421,6 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
@ -526,13 +433,11 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name()) DisableFullCodegen(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
IncrementNodeCount();
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@ -542,10 +447,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
DisableFullCodegen(kFunctionCallsEval);
}
IncrementNodeCount();
ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
@ -553,7 +454,6 @@ void AstNumberingVisitor::VisitCall(Call* node) {
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
@ -581,7 +481,6 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
eager_literals_->Add(new (zone())
@ -601,7 +500,6 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
IncrementNodeCount();
Visit(node->expression());
}
@ -609,29 +507,7 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegen(kSuperReference);
}
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
DisableFullCodegen(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
DisableFullCodegen(kRestParameter);
}
if (IsResumableFunction(node->kind())) {
DisableFullCodegen(kGenerator);
}
if (IsClassConstructor(node->kind())) {
DisableFullCodegen(kClassConstructorFunction);
}
function_kind_ = node->kind();
LanguageModeScope language_mode_scope(this, node->language_mode());
if (collect_type_profile_) {
@ -645,25 +521,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
if (dont_self_optimize_) {
node->set_dont_self_optimize();
}
if (disable_fullcodegen_reason_ != kNoReason) {
node->set_must_use_ignition();
if (FLAG_trace_opt && FLAG_stress_fullcodegen) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
// if we move ast numbering off the main thread, but that won't be before
// we remove FCG, in which case this entire check isn't necessary anyway.
AllowHandleDereference allow_deref;
DCHECK(!node->debug_name().is_null());
PrintF("[enforcing Ignition for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_fullcodegen_reason_));
}
}
return !HasStackOverflow();
}

37
deps/v8/src/ast/ast-source-ranges.h

@ -27,7 +27,8 @@ struct SourceRange {
int32_t start, end;
};
// The list of ast node kinds that have associated source ranges.
// The list of ast node kinds that have associated source ranges. Note that this
// macro is not undefined at the end of this file.
#define AST_SOURCE_RANGE_LIST(V) \
V(Block) \
V(CaseClause) \
@ -35,6 +36,7 @@ struct SourceRange {
V(IfStatement) \
V(IterationStatement) \
V(JumpStatement) \
V(Suspend) \
V(SwitchStatement) \
V(Throw) \
V(TryCatchStatement) \
@ -164,6 +166,12 @@ class JumpStatementSourceRanges final : public ContinuationSourceRanges {
: ContinuationSourceRanges(continuation_position) {}
};
class SuspendSourceRanges final : public ContinuationSourceRanges {
public:
explicit SuspendSourceRanges(int32_t continuation_position)
: ContinuationSourceRanges(continuation_position) {}
};
class SwitchStatementSourceRanges final : public ContinuationSourceRanges {
public:
explicit SwitchStatementSourceRanges(int32_t continuation_position)
@ -182,8 +190,14 @@ class TryCatchStatementSourceRanges final : public AstNodeSourceRanges {
: catch_range_(catch_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kCatch);
return catch_range_;
switch (kind) {
case SourceRangeKind::kCatch:
return catch_range_;
case SourceRangeKind::kContinuation:
return SourceRange::ContinuationOf(catch_range_);
default:
UNREACHABLE();
}
}
private:
@ -196,8 +210,14 @@ class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges {
: finally_range_(finally_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kFinally);
return finally_range_;
switch (kind) {
case SourceRangeKind::kFinally:
return finally_range_;
case SourceRangeKind::kContinuation:
return SourceRange::ContinuationOf(finally_range_);
default:
UNREACHABLE();
}
}
private:
@ -210,7 +230,7 @@ class SourceRangeMap final : public ZoneObject {
public:
explicit SourceRangeMap(Zone* zone) : map_(zone) {}
AstNodeSourceRanges* Find(AstNode* node) {
AstNodeSourceRanges* Find(ZoneObject* node) {
auto it = map_.find(node);
if (it == map_.end()) return nullptr;
return it->second;
@ -219,17 +239,16 @@ class SourceRangeMap final : public ZoneObject {
// Type-checked insertion.
#define DEFINE_MAP_INSERT(type) \
void Insert(type* node, type##SourceRanges* ranges) { \
DCHECK_NOT_NULL(node); \
map_.emplace(node, ranges); \
}
AST_SOURCE_RANGE_LIST(DEFINE_MAP_INSERT)
#undef DEFINE_MAP_INSERT
private:
ZoneMap<AstNode*, AstNodeSourceRanges*> map_;
ZoneMap<ZoneObject*, AstNodeSourceRanges*> map_;
};
#undef AST_SOURCE_RANGE_LIST
} // namespace internal
} // namespace v8

17
deps/v8/src/ast/ast-traversal-visitor.h

@ -210,11 +210,6 @@ void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
}
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCaseClause(CaseClause* clause) {
UNREACHABLE();
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitDoWhileStatement(
DoWhileStatement* stmt) {
@ -359,6 +354,12 @@ void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
RECURSE_EXPRESSION(Visit(expr->value()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCompoundAssignment(
CompoundAssignment* expr) {
VisitAssignment(expr);
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) {
PROCESS_EXPRESSION(expr);
@ -490,6 +491,12 @@ void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
RECURSE_EXPRESSION(Visit(expr->iterable()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitGetTemplateObject(
GetTemplateObject* expr) {
PROCESS_EXPRESSION(expr);
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitImportCallExpression(
ImportCallExpression* expr) {

49
deps/v8/src/ast/ast-value-factory.cc

@ -168,6 +168,30 @@ void AstConsString::Internalize(Isolate* isolate) {
set_string(tmp);
}
AstValue::AstValue(double n) : next_(nullptr) {
int int_value;
if (DoubleToSmiInteger(n, &int_value)) {
type_ = SMI;
smi_ = int_value;
} else {
type_ = NUMBER;
number_ = n;
}
}
bool AstValue::ToUint32(uint32_t* value) const {
if (IsSmi()) {
int num = smi_;
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
return DoubleToUint32IfEqualToSelf(number_, value);
}
return false;
}
bool AstValue::IsPropertyName() const {
if (type_ == STRING) {
uint32_t index;
@ -242,6 +266,31 @@ void AstValue::Internalize(Isolate* isolate) {
}
}
AstStringConstants::AstStringConstants(Isolate* isolate, uint32_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
base::HashMap::Entry* entry = \
string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
DCHECK_NULL(entry->value); \
entry->value = reinterpret_cast<void*>(1); \
}
AST_STRING_CONSTANTS(F)
#undef F
}
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {

58
deps/v8/src/ast/ast-value-factory.h

@ -209,18 +209,7 @@ class AstValue : public ZoneObject {
return Smi::FromInt(smi_);
}
bool ToUint32(uint32_t* value) const {
if (IsSmi()) {
int num = smi_;
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
return DoubleToUint32IfEqualToSelf(number_, value);
}
return false;
}
bool ToUint32(uint32_t* value) const;
bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string;
@ -274,16 +263,7 @@ class AstValue : public ZoneObject {
symbol_ = symbol;
}
explicit AstValue(double n) : next_(nullptr) {
int int_value;
if (DoubleToSmiInteger(n, &int_value)) {
type_ = SMI;
smi_ = int_value;
} else {
type_ = NUMBER;
number_ = n;
}
}
explicit AstValue(double n);
AstValue(Type t, int i) : type_(t), next_(nullptr) {
DCHECK(type_ == SMI);
@ -316,7 +296,7 @@ class AstValue : public ZoneObject {
};
// For generating constants.
#define STRING_CONSTANTS(F) \
#define AST_STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(async, "async") \
@ -361,34 +341,11 @@ class AstValue : public ZoneObject {
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
base::HashMap::Entry* entry = \
string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
DCHECK_NULL(entry->value); \
entry->value = reinterpret_cast<void*>(1); \
}
STRING_CONSTANTS(F)
#undef F
}
AstStringConstants(Isolate* isolate, uint32_t hash_seed);
#define F(name, str) \
const AstRawString* name##_string() const { return name##_string_; }
STRING_CONSTANTS(F)
AST_STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
@ -402,7 +359,7 @@ class AstStringConstants final {
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
STRING_CONSTANTS(F)
AST_STRING_CONSTANTS(F)
#undef F
DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
@ -464,7 +421,7 @@ class AstValueFactory {
const AstRawString* name##_string() const { \
return string_constants_->name##_string(); \
}
STRING_CONSTANTS(F)
AST_STRING_CONSTANTS(F)
#undef F
const AstConsString* empty_cons_string() const { return empty_cons_string_; }
@ -544,7 +501,6 @@ class AstValueFactory {
} // namespace internal
} // namespace v8
#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
#endif // V8_AST_AST_VALUE_FACTORY_H_

202
deps/v8/src/ast/ast.cc

@ -139,11 +139,6 @@ bool Expression::IsValidReferenceExpression() const {
(IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
}
bool Expression::IsValidReferenceExpressionOrThis() const {
return IsValidReferenceExpression() ||
(IsVariableProxy() && AsVariableProxy()->is_this());
}
bool Expression::IsAnonymousFunctionDefinition() const {
return (IsFunctionLiteral() &&
AsFunctionLiteral()->IsAnonymousFunctionDefinition()) ||
@ -246,30 +241,28 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
AssignVectorSlots(each(), spec, language_mode, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
for_in_feedback_slot_ = spec->AddForInSlot();
}
Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
int pos)
: Expression(pos, kAssignment),
target_(target),
value_(value),
binary_operation_(NULL) {
bit_field_ |= IsUninitializedField::encode(false) |
KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(pos, node_type), target_(target), value_(value) {
bit_field_ |= TokenField::encode(op);
}
void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
AssignVectorSlots(target(), spec, language_mode, &slot_);
}
void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
AssignVectorSlots(expression(), spec, language_mode, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
@ -278,24 +271,6 @@ void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
}
Token::Value Assignment::binary_op() const {
switch (op()) {
case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
case Token::ASSIGN_BIT_XOR: return Token::BIT_XOR;
case Token::ASSIGN_BIT_AND: return Token::BIT_AND;
case Token::ASSIGN_SHL: return Token::SHL;
case Token::ASSIGN_SAR: return Token::SAR;
case Token::ASSIGN_SHR: return Token::SHR;
case Token::ASSIGN_ADD: return Token::ADD;
case Token::ASSIGN_SUB: return Token::SUB;
case Token::ASSIGN_MUL: return Token::MUL;
case Token::ASSIGN_DIV: return Token::DIV;
case Token::ASSIGN_MOD: return Token::MOD;
default: UNREACHABLE();
}
return Token::ILLEGAL;
}
bool FunctionLiteral::ShouldEagerCompile() const {
return scope()->ShouldEagerCompile();
}
@ -308,6 +283,9 @@ bool FunctionLiteral::AllowsLazyCompilation() {
return scope()->AllowsLazyCompilation();
}
Handle<String> FunctionLiteral::name(Isolate* isolate) const {
return raw_name_ ? raw_name_->string() : isolate->factory()->empty_string();
}
int FunctionLiteral::start_position() const {
return scope()->start_position();
@ -331,23 +309,6 @@ bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
}
void FunctionLiteral::ReplaceBodyAndScope(FunctionLiteral* other) {
DCHECK_NULL(body_);
DCHECK_NOT_NULL(scope_);
DCHECK_NOT_NULL(other->scope());
Scope* outer_scope = scope_->outer_scope();
body_ = other->body();
scope_ = other->scope();
scope_->ReplaceOuterScope(outer_scope);
#ifdef DEBUG
scope_->set_replaced_from_parse_task(true);
#endif
function_length_ = other->function_length_;
}
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
@ -396,6 +357,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror BytecodeGenerator::VisitClassLiteral.
@ -403,10 +365,6 @@ void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
home_object_slot_ = spec->AddStoreICSlot(language_mode);
}
if (NeedsProxySlot()) {
proxy_slot_ = spec->AddStoreICSlot(language_mode);
}
for (int i = 0; i < properties()->length(); i++) {
ClassLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
@ -433,8 +391,9 @@ bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, kind, cache);
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
@ -675,7 +634,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
}
bool ObjectLiteral::IsFastCloningSupported() const {
// The FastCloneShallowObject builtin doesn't copy elements, and object
// The CreateShallowObjectLiteratal builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && is_shallow() &&
@ -683,6 +642,12 @@ bool ObjectLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
bool ArrayLiteral::is_empty() const {
DCHECK(is_initialized());
return values()->is_empty() &&
(constant_elements().is_null() || constant_elements()->is_empty());
}
int ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
if (is_initialized()) return depth();
@ -791,8 +756,9 @@ void ArrayLiteral::RewindSpreads() {
void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, kind, cache);
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
@ -854,8 +820,51 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
DCHECK(IsRegExpLiteral());
}
Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
Isolate* isolate) {
Handle<FixedArray> raw_strings =
isolate->factory()->NewFixedArray(this->raw_strings()->length(), TENURED);
bool raw_and_cooked_match = true;
for (int i = 0; i < raw_strings->length(); ++i) {
if (*this->raw_strings()->at(i)->value() !=
*this->cooked_strings()->at(i)->value()) {
raw_and_cooked_match = false;
}
raw_strings->set(i, *this->raw_strings()->at(i)->value());
}
Handle<FixedArray> cooked_strings = raw_strings;
if (!raw_and_cooked_match) {
cooked_strings = isolate->factory()->NewFixedArray(
this->cooked_strings()->length(), TENURED);
for (int i = 0; i < cooked_strings->length(); ++i) {
cooked_strings->set(i, *this->cooked_strings()->at(i)->value());
}
}
return isolate->factory()->NewTemplateObjectDescription(
this->hash(), raw_strings, cooked_strings);
}
void UnaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
switch (op()) {
// Only unary plus, minus, and bitwise-not currently collect feedback.
case Token::ADD:
case Token::SUB:
case Token::BIT_NOT:
// Note that the slot kind remains "BinaryOp", as the operation
// is transformed into a binary operation in the BytecodeGenerator.
feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
return;
default:
return;
}
}
void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
@ -902,6 +911,7 @@ static bool IsTypeof(Expression* expr) {
void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
@ -987,63 +997,8 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
// ----------------------------------------------------------------------------
// Recording of type feedback
Handle<Map> SmallMapList::at(int i) const { return Handle<Map>(list_.at(i)); }
SmallMapList* Expression::GetReceiverTypes() {
switch (node_type()) {
#define NODE_LIST(V) \
PROPERTY_NODE_LIST(V) \
V(Call)
#define GENERATE_CASE(Node) \
case k##Node: \
return static_cast<Node*>(this)->GetReceiverTypes();
NODE_LIST(GENERATE_CASE)
#undef NODE_LIST
#undef GENERATE_CASE
default:
UNREACHABLE();
}
}
KeyedAccessStoreMode Expression::GetStoreMode() const {
switch (node_type()) {
#define GENERATE_CASE(Node) \
case k##Node: \
return static_cast<const Node*>(this)->GetStoreMode();
PROPERTY_NODE_LIST(GENERATE_CASE)
#undef GENERATE_CASE
default:
UNREACHABLE();
}
}
IcCheckType Expression::GetKeyType() const {
switch (node_type()) {
#define GENERATE_CASE(Node) \
case k##Node: \
return static_cast<const Node*>(this)->GetKeyType();
PROPERTY_NODE_LIST(GENERATE_CASE)
#undef GENERATE_CASE
default:
UNREACHABLE();
}
}
bool Expression::IsMonomorphic() const {
switch (node_type()) {
#define GENERATE_CASE(Node) \
case k##Node: \
return static_cast<const Node*>(this)->IsMonomorphic();
PROPERTY_NODE_LIST(GENERATE_CASE)
CALL_NODE_LIST(GENERATE_CASE)
#undef GENERATE_CASE
default:
UNREACHABLE();
}
}
void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
LanguageMode language_mode, FunctionKind kind,
FeedbackSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot();
}
@ -1075,12 +1030,12 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
int pos)
: Expression(pos, kCaseClause), label_(label), statements_(statements) {}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {}
void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FunctionKind kind,
FeedbackSlotCache* cache) {
feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
@ -1109,5 +1064,20 @@ const char* CallRuntime::debug_name() {
#endif // DEBUG
}
#define RETURN_LABELS(NodeType) \
case k##NodeType: \
return static_cast<const NodeType*>(this)->labels();
ZoneList<const AstRawString*>* BreakableStatement::labels() const {
switch (node_type()) {
BREAKABLE_NODE_LIST(RETURN_LABELS)
ITERATION_NODE_LIST(RETURN_LABELS)
default:
UNREACHABLE();
}
}
#undef RETURN_LABELS
} // namespace internal
} // namespace v8

733
deps/v8/src/ast/ast.h

File diff suppressed because it is too large

2
deps/v8/src/ast/modules.cc

@ -6,7 +6,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/objects/module.h"
#include "src/pending-compilation-error-handler.h"
namespace v8 {

106
deps/v8/src/ast/prettyprinter.cc

@ -22,13 +22,22 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
num_prints_ = 0;
found_ = false;
done_ = false;
iterator_hint_ = IteratorHint::kNone;
is_call_error_ = false;
is_iterator_error_ = false;
is_async_iterator_error_ = false;
is_user_js_ = is_user_js;
InitializeAstVisitor(isolate);
}
CallPrinter::IteratorHint CallPrinter::GetIteratorHint() const {
return iterator_hint_;
CallPrinter::ErrorHint CallPrinter::GetErrorHint() const {
if (is_call_error_) {
if (is_iterator_error_) return ErrorHint::kCallAndNormalIterator;
if (is_async_iterator_error_) return ErrorHint::kCallAndAsyncIterator;
} else {
if (is_iterator_error_) return ErrorHint::kNormalIterator;
if (is_async_iterator_error_) return ErrorHint::kAsyncIterator;
}
return ErrorHint::kNone;
}
Handle<String> CallPrinter::Print(FunctionLiteral* program, int position) {
@ -40,7 +49,6 @@ Handle<String> CallPrinter::Print(FunctionLiteral* program, int position) {
void CallPrinter::Find(AstNode* node, bool print) {
if (done_) return;
if (found_) {
if (print) {
int prev_num_prints = num_prints_;
@ -118,16 +126,10 @@ void CallPrinter::VisitWithStatement(WithStatement* node) {
void CallPrinter::VisitSwitchStatement(SwitchStatement* node) {
Find(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) Find(cases->at(i));
}
void CallPrinter::VisitCaseClause(CaseClause* clause) {
if (!clause->is_default()) {
Find(clause->label());
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) Find(clause->label());
FindStatements(clause->statements());
}
FindStatements(clause->statements());
}
@ -261,6 +263,10 @@ void CallPrinter::VisitAssignment(Assignment* node) {
Find(node->value());
}
void CallPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
VisitAssignment(node);
}
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
void CallPrinter::VisitYieldStar(YieldStar* node) { Find(node->expression()); }
@ -287,7 +293,11 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_;
bool was_found = false;
if (node->position() == position_) {
is_call_error_ = true;
was_found = !found_;
}
if (was_found) {
// Bail out if the error is caused by a direct call to a variable in
// non-user JS code. The variable name is meaningless due to minification.
@ -300,12 +310,19 @@ void CallPrinter::VisitCall(Call* node) {
Find(node->expression(), true);
if (!was_found) Print("(...)");
FindArguments(node->arguments());
if (was_found) done_ = true;
if (was_found) {
done_ = true;
found_ = false;
}
}
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_;
bool was_found = false;
if (node->position() == position_) {
is_call_error_ = true;
was_found = !found_;
}
if (was_found) {
// Bail out if the error is caused by a direct call to a variable in
// non-user JS code. The variable name is meaningless due to minification.
@ -317,7 +334,10 @@ void CallPrinter::VisitCallNew(CallNew* node) {
}
Find(node->expression(), was_found);
FindArguments(node->arguments());
if (was_found) done_ = true;
if (was_found) {
done_ = true;
found_ = false;
}
}
@ -381,17 +401,24 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
}
void CallPrinter::VisitGetIterator(GetIterator* node) {
bool was_found = !found_ && node->position() == position_;
if (was_found) {
found_ = true;
iterator_hint_ = node->hint() == IteratorType::kNormal
? IteratorHint::kNormal
: IteratorHint::kAsync;
bool was_found = false;
if (node->position() == position_) {
is_async_iterator_error_ = node->hint() == IteratorType::kAsync;
is_iterator_error_ = !is_async_iterator_error_;
was_found = !found_;
if (was_found) {
found_ = true;
}
}
Find(node->iterable_for_call_printer(), true);
if (was_found) done_ = true;
if (was_found) {
done_ = true;
found_ = false;
}
}
void CallPrinter::VisitGetTemplateObject(GetTemplateObject* node) {}
void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print("ImportCall(");
Find(node->argument(), true);
@ -806,20 +833,15 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) {
IndentedScope indent(this, "SWITCH", node->position());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("TAG", node->tag());
for (int i = 0; i < node->cases()->length(); i++) {
Visit(node->cases()->at(i));
}
}
void AstPrinter::VisitCaseClause(CaseClause* clause) {
if (clause->is_default()) {
IndentedScope indent(this, "DEFAULT", clause->position());
PrintStatements(clause->statements());
} else {
IndentedScope indent(this, "CASE", clause->position());
Visit(clause->label());
PrintStatements(clause->statements());
for (CaseClause* clause : *node->cases()) {
if (clause->is_default()) {
IndentedScope indent(this, "DEFAULT");
PrintStatements(clause->statements());
} else {
IndentedScope indent(this, "CASE");
Visit(clause->label());
PrintStatements(clause->statements());
}
}
}
@ -1114,6 +1136,10 @@ void AstPrinter::VisitAssignment(Assignment* node) {
Visit(node->value());
}
void AstPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
VisitAssignment(node);
}
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD id %d", node->suspend_id());
@ -1226,6 +1252,10 @@ void AstPrinter::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstPrinter::VisitGetTemplateObject(GetTemplateObject* node) {
IndentedScope indent(this, "GET-TEMPLATE-OBJECT", node->position());
}
void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
IndentedScope indent(this, "IMPORT-CALL", node->position());
Visit(node->argument());

15
deps/v8/src/ast/prettyprinter.h

@ -20,8 +20,14 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
// The following routine prints the node with position |position| into a
// string.
Handle<String> Print(FunctionLiteral* program, int position);
enum IteratorHint { kNone, kNormal, kAsync };
IteratorHint GetIteratorHint() const;
enum ErrorHint {
kNone,
kNormalIterator,
kAsyncIterator,
kCallAndNormalIterator,
kCallAndAsyncIterator
};
ErrorHint GetErrorHint() const;
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node);
@ -41,8 +47,9 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
bool found_;
bool done_;
bool is_user_js_;
IteratorHint iterator_hint_;
bool is_iterator_error_;
bool is_async_iterator_error_;
bool is_call_error_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:

236
deps/v8/src/ast/scopes.cc

@ -8,11 +8,12 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/objects/module.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/preparsed-scope-data.h"
@ -188,7 +189,6 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
params_(4, zone) {
DCHECK_NE(scope_type, SCRIPT_SCOPE);
SetDefaults();
asm_function_ = outer_scope_->IsAsmModule();
}
ModuleScope::ModuleScope(DeclarationScope* script_scope,
@ -259,7 +259,7 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
#ifdef DEBUG
already_resolved_ = true;
#endif
if (scope_info->CallsEval()) RecordEvalCall();
if (scope_info->CallsSloppyEval()) scope_calls_eval_ = true;
set_language_mode(scope_info->language_mode());
num_heap_slots_ = scope_info->ContextLength();
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
@ -300,7 +300,6 @@ void DeclarationScope::SetDefaults() {
is_declaration_scope_ = true;
has_simple_parameters_ = true;
asm_module_ = false;
asm_function_ = false;
force_eager_compilation_ = false;
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
@ -371,20 +370,25 @@ void DeclarationScope::set_should_eager_compile() {
void DeclarationScope::set_asm_module() {
asm_module_ = true;
// Mark any existing inner function scopes as asm function scopes.
for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
if (inner->is_function_scope()) {
inner->AsDeclarationScope()->set_asm_function();
}
}
}
bool Scope::IsAsmModule() const {
return is_function_scope() && AsDeclarationScope()->asm_module();
}
bool Scope::IsAsmFunction() const {
return is_function_scope() && AsDeclarationScope()->asm_function();
bool Scope::ContainsAsmModule() const {
if (IsAsmModule()) return true;
// Check inner scopes recursively
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
// Don't check inner functions which won't be eagerly compiled.
if (!scope->is_function_scope() ||
scope->AsDeclarationScope()->ShouldEagerCompile()) {
if (scope->ContainsAsmModule()) return true;
}
}
return false;
}
Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
@ -417,8 +421,6 @@ Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
} else if (scope_info->scope_type() == FUNCTION_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
if (scope_info->IsAsmFunction())
outer_scope->AsDeclarationScope()->set_asm_function();
if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module();
} else if (scope_info->scope_type() == EVAL_SCOPE) {
@ -601,7 +603,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(!is_being_lazily_parsed_);
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
factory->NewVariableDeclaration(proxy, kNoSourcePosition);
// Based on the preceding checks, it doesn't matter what we pass as
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
@ -615,36 +617,47 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
Variable* var = DeclareVariableName(name, VAR);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_experimental_preparser_scope_analysis);
DCHECK(FLAG_preparser_scope_analysis);
var->set_maybe_assigned();
}
}
}
}
void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
DCHECK(scope->scope_info_.is_null());
void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
DCHECK(scope_info_.is_null());
Handle<ScopeInfo> outer_scope_info;
if (info->maybe_outer_scope_info().ToHandle(&outer_scope_info)) {
if (scope->outer_scope()) {
// If we have a scope info we will potentially need to lookup variable names
// on the scope info as internalized strings, so make sure ast_value_factory
// is internalized.
info->ast_value_factory()->Internalize(isolate);
if (outer_scope()) {
DeclarationScope* script_scope = new (info->zone())
DeclarationScope(info->zone(), info->ast_value_factory());
info->set_script_scope(script_scope);
scope->ReplaceOuterScope(Scope::DeserializeScopeChain(
ReplaceOuterScope(Scope::DeserializeScopeChain(
info->zone(), *outer_scope_info, script_scope,
info->ast_value_factory(),
Scope::DeserializationMode::kIncludingVariables));
} else {
DCHECK_EQ(outer_scope_info->scope_type(), SCRIPT_SCOPE);
scope->SetScriptScopeInfo(outer_scope_info);
SetScriptScopeInfo(outer_scope_info);
}
}
}
void DeclarationScope::Analyze(ParseInfo* info) {
RuntimeCallTimerScope runtimeTimer(info->runtime_call_stats(),
&RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
base::Optional<AllowHandleDereference> allow_deref;
if (!info->maybe_outer_scope_info().is_null()) {
// Allow dereferences to the scope info if there is one.
allow_deref.emplace();
}
if (scope->is_eval_scope() && is_sloppy(scope->language_mode())) {
AstNodeFactory factory(info->ast_value_factory(), info->zone());
@ -655,32 +668,24 @@ void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
// 1) top-level code,
// 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved.
// 4) an asm.js function
DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->already_resolved_ ||
(info->asm_function_scope() && scope->is_function_scope()));
scope->outer_scope()->already_resolved_);
// The outer scope is never lazy.
scope->set_should_eager_compile();
if (scope->must_use_preparsed_scope_data_) {
DCHECK(FLAG_experimental_preparser_scope_analysis);
DCHECK(FLAG_preparser_scope_analysis);
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
allow_deref.emplace();
info->consumed_preparsed_scope_data()->RestoreScopeAllocationData(scope);
}
scope->AllocateVariables(info, isolate, mode);
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope()->scope_info_.is_null()) {
info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
}
scope->AllocateVariables(info);
#ifdef DEBUG
if (info->script_is_native() ? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
if (info->is_native() ? FLAG_print_builtin_scopes : FLAG_print_scopes) {
PrintF("Global scope:\n");
scope->Print();
}
@ -792,7 +797,7 @@ Scope* Scope::FinalizeBlockScope() {
DCHECK(!HasBeenRemoved());
if (variables_.occupancy() > 0 ||
(is_declaration_scope() && calls_sloppy_eval())) {
(is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval())) {
return this;
}
@ -825,9 +830,13 @@ Scope* Scope::FinalizeBlockScope() {
unresolved_ = nullptr;
}
if (scope_calls_eval_) outer_scope()->scope_calls_eval_ = true;
if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
// No need to propagate scope_calls_eval_, since if it was relevant to
// this scope we would have had to bail out at the top.
DCHECK(!scope_calls_eval_ || !is_declaration_scope() ||
!is_sloppy(language_mode()));
// This block does not need a context.
num_heap_slots_ = 0;
@ -1038,7 +1047,7 @@ Variable* DeclarationScope::DeclareParameterName(
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
}
if (FLAG_experimental_preparser_scope_analysis) {
if (FLAG_preparser_scope_analysis) {
Variable* var;
if (declare_as_local) {
var = Declare(zone(), name, VAR);
@ -1207,14 +1216,19 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
if (FLAG_experimental_preparser_scope_analysis) {
if (FLAG_preparser_scope_analysis) {
Variable* var = LookupLocal(name);
DCHECK_NE(var, kDummyPreParserLexicalVariable);
DCHECK_NE(var, kDummyPreParserVariable);
if (var == nullptr) {
var = DeclareLocal(name, mode);
} else if (IsLexicalVariableMode(mode) ||
IsLexicalVariableMode(var->mode())) {
// Duplicate functions are allowed in the sloppy mode, but if this is not
// a function declaration, it's an error. This is an error PreParser
// hasn't previously detected. TODO(marja): Investigate whether we can now
// start returning this error.
} else if (mode == VAR) {
DCHECK_EQ(var->mode(), VAR);
var->set_maybe_assigned();
}
var->set_is_used();
@ -1275,28 +1289,36 @@ Variable* Scope::NewTemporary(const AstRawString* name,
Declaration* Scope::CheckConflictingVarDeclarations() {
for (Declaration* decl : decls_) {
VariableMode mode = decl->proxy()->var()->mode();
if (IsLexicalVariableMode(mode) && !is_block_scope()) continue;
// Iterate through all scopes until and including the declaration scope.
Scope* previous = NULL;
Scope* current = decl->scope();
// Lexical vs lexical conflicts within the same scope have already been
// captured in Parser::Declare. The only conflicts we still need to check
// are lexical vs VAR, or any declarations within a declaration block scope
// vs lexical declarations in its surrounding (function) scope.
if (IsLexicalVariableMode(mode)) current = current->outer_scope_;
do {
// are lexical vs nested var, or any declarations within a declaration
// block scope vs lexical declarations in its surrounding (function) scope.
Scope* current = this;
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
DCHECK_EQ(mode, VAR);
current = decl->AsVariableDeclaration()->AsNested()->scope();
} else if (IsLexicalVariableMode(mode)) {
if (!is_block_scope()) continue;
DCHECK(is_declaration_scope());
DCHECK_EQ(outer_scope()->scope_type(), FUNCTION_SCOPE);
current = outer_scope();
}
// Iterate through all scopes until and including the declaration scope.
while (true) {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var =
current->variables_.Lookup(decl->proxy()->raw_name());
if (other_var != NULL && IsLexicalVariableMode(other_var->mode())) {
if (other_var != nullptr && IsLexicalVariableMode(other_var->mode())) {
return decl;
}
previous = current;
current = current->outer_scope_;
} while (!previous->is_declaration_scope());
if (current->is_declaration_scope()) break;
current = current->outer_scope();
}
}
return NULL;
return nullptr;
}
Declaration* Scope::CheckLexDeclarationsConflictingWith(
@ -1317,29 +1339,13 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
return nullptr;
}
void DeclarationScope::AllocateVariables(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
void DeclarationScope::AllocateVariables(ParseInfo* info) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
ResolveVariablesRecursively(info);
AllocateVariablesRecursively();
MaybeHandle<ScopeInfo> outer_scope;
if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_;
AllocateScopeInfosRecursively(isolate, outer_scope);
if (mode == AnalyzeMode::kDebugger) {
AllocateDebuggerScopeInfos(isolate, outer_scope);
}
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info.
// TODO(jochen|yangguo): Remove this requirement.
if (scope_info_.is_null()) {
scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
}
}
bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
@ -1390,7 +1396,10 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
for (const Scope* s = this; s != nullptr; s = s->outer_scope()) {
if (!s->NeedsContext()) continue;
length++;
if (s->calls_sloppy_eval()) result = length;
if (s->is_declaration_scope() &&
s->AsDeclarationScope()->calls_sloppy_eval()) {
result = length;
}
}
return result;
@ -1506,7 +1515,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
}
void Scope::SavePreParsedScopeData() {
DCHECK(FLAG_experimental_preparser_scope_analysis);
DCHECK(FLAG_preparser_scope_analysis);
if (ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(this)) {
AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope();
}
@ -1518,7 +1527,7 @@ void Scope::SavePreParsedScopeData() {
void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() {
if (produced_preparsed_scope_data_ != nullptr) {
DCHECK(FLAG_experimental_preparser_scope_analysis);
DCHECK(FLAG_preparser_scope_analysis);
produced_preparsed_scope_data_->SaveScopeAllocationData(this);
}
}
@ -1527,8 +1536,7 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
if (!outer_scope_->is_script_scope() ||
FLAG_experimental_preparser_scope_analysis) {
if (!outer_scope_->is_script_scope() || FLAG_preparser_scope_analysis) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
@ -1551,7 +1559,7 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
function_ = ast_node_factory->CopyVariable(function_);
}
if (FLAG_experimental_preparser_scope_analysis) {
if (FLAG_preparser_scope_analysis) {
SavePreParsedScopeData();
}
}
@ -1722,8 +1730,9 @@ void Scope::Print(int n) {
Indent(n1, "// strict mode scope\n");
}
if (IsAsmModule()) Indent(n1, "// scope is an asm module\n");
if (IsAsmFunction()) Indent(n1, "// scope is an asm function\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
Indent(n1, "// scope calls sloppy 'eval'\n");
}
if (is_declaration_scope() && AsDeclarationScope()->uses_super_property()) {
Indent(n1, "// scope uses 'super' property\n");
}
@ -1784,8 +1793,8 @@ void Scope::Print(int n) {
void Scope::CheckScopePositions() {
// Visible leaf scopes must have real positions.
if (!is_hidden() && inner_scope_ == nullptr) {
CHECK_NE(kNoSourcePosition, start_position());
CHECK_NE(kNoSourcePosition, end_position());
DCHECK_NE(kNoSourcePosition, start_position());
DCHECK_NE(kNoSourcePosition, end_position());
}
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->CheckScopePositions();
@ -1801,9 +1810,6 @@ void Scope::CheckZones() {
DCHECK_NULL(scope->inner_scope_);
continue;
}
if (!scope->replaced_from_parse_task()) {
CHECK_EQ(scope->zone(), zone());
}
scope->CheckZones();
}
}
@ -1884,7 +1890,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
return NonLocal(proxy->raw_name(), DYNAMIC);
}
if (calls_sloppy_eval() && is_declaration_scope()) {
if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
// A variable binding may have been found in an outer scope, but the current
// scope makes a sloppy 'eval' call, so the found variable may not be the
// correct one (the 'eval' may introduce a binding with the same name). In
@ -1981,10 +1987,10 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#ifdef DEBUG
if (info->script_is_native()) {
if (info->is_native()) {
// To avoid polluting the global object in native scripts
// - Variables must not be allocated to the global scope.
CHECK_NOT_NULL(outer_scope());
DCHECK_NOT_NULL(outer_scope());
// - Variables must be bound locally or unallocated.
if (var->IsGlobalObjectProperty()) {
// The following variable name may be minified. If so, disable
@ -1994,10 +2000,10 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
name->ToCString().get());
}
VariableLocation location = var->location();
CHECK(location == VariableLocation::LOCAL ||
location == VariableLocation::CONTEXT ||
location == VariableLocation::PARAMETER ||
location == VariableLocation::UNALLOCATED);
DCHECK(location == VariableLocation::LOCAL ||
location == VariableLocation::CONTEXT ||
location == VariableLocation::PARAMETER ||
location == VariableLocation::UNALLOCATED);
}
#endif
@ -2268,8 +2274,7 @@ void ModuleScope::AllocateModuleVariables() {
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_IMPLIES(!FLAG_experimental_preparser_scope_analysis,
num_stack_slots_ == 0);
DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
@ -2300,8 +2305,9 @@ void Scope::AllocateVariablesRecursively() {
// Likewise for modules and function scopes representing asm.js modules.
bool must_have_context =
is_with_scope() || is_module_scope() || IsAsmModule() ||
(is_function_scope() && calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() && calls_sloppy_eval());
(is_function_scope() && AsDeclarationScope()->calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() &&
AsDeclarationScope()->calls_sloppy_eval());
// If we didn't allocate any locals in the local context, then we only
// need the minimal number of slots if we must have a context.
@ -2346,6 +2352,38 @@ void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
}
}
// static
void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
DeclarationScope* scope = info->literal()->scope();
if (!scope->scope_info_.is_null()) return; // Allocated by outer function.
MaybeHandle<ScopeInfo> outer_scope;
if (scope->outer_scope_ != nullptr) {
outer_scope = scope->outer_scope_->scope_info_;
}
scope->AllocateScopeInfosRecursively(isolate, outer_scope);
if (mode == AnalyzeMode::kDebugger) {
scope->AllocateDebuggerScopeInfos(isolate, outer_scope);
}
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info.
// TODO(jochen|yangguo): Remove this requirement.
if (scope->scope_info_.is_null()) {
scope->scope_info_ =
ScopeInfo::Create(isolate, scope->zone(), scope, outer_scope);
}
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope() && info->script_scope()->scope_info_.is_null()) {
info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
}
}
int Scope::StackLocalCount() const {
Variable* function =
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;

40
deps/v8/src/ast/scopes.h

@ -264,7 +264,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// eval call.
void RecordEvalCall() {
scope_calls_eval_ = true;
RecordInnerScopeEvalCall();
}
void RecordInnerScopeEvalCall() {
@ -363,14 +362,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_declaration_scope() const { return is_declaration_scope_; }
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode());
}
bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
bool IsAsmModule() const;
bool IsAsmFunction() const;
// Returns true if this scope or any inner scopes that might be eagerly
// compiled are asm modules.
bool ContainsAsmModule() const;
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
@ -467,11 +463,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Check that all Scopes in the scope tree use the same Zone.
void CheckZones();
bool replaced_from_parse_task() const { return replaced_from_parse_task_; }
void set_replaced_from_parse_task(bool replaced_from_parse_task) {
replaced_from_parse_task_ = replaced_from_parse_task;
}
#endif
// Retrieve `IsSimpleParameterList` of current or outer function.
@ -556,10 +547,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if this scope may contain objects from a temp zone that needs to be
// fixed up.
bool needs_migration_;
// True if scope comes from other zone - as a result of being created in a
// parse tasks.
bool replaced_from_parse_task_ = false;
#endif
// Source positions.
@ -693,6 +680,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
IsClassConstructor(function_kind())));
}
bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode());
}
bool was_lazily_parsed() const { return was_lazily_parsed_; }
#ifdef DEBUG
@ -713,8 +704,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; }
void set_asm_module();
bool asm_function() const { return asm_function_; }
void set_asm_function() { asm_function_ = true; }
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
@ -858,10 +847,14 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return sloppy_block_function_map_;
}
// Replaces the outer scope with the outer_scope_info in |info| if there is
// one.
void AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
static void Analyze(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
static void Analyze(ParseInfo* info);
// To be called during parsing. Do just enough scope analysis that we can
// discard the Scope contents for lazily compiled functions. In particular,
@ -870,6 +863,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// and recreates them with the correct Zone with ast_node_factory.
void AnalyzePartially(AstNodeFactory* ast_node_factory);
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> non_locals);
@ -927,7 +925,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// In the case of code compiled and run using 'eval', the context
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
void AllocateVariables(ParseInfo* info, Isolate* isolate, AnalyzeMode mode);
void AllocateVariables(ParseInfo* info);
void SetDefaults();
@ -937,8 +935,6 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool has_simple_parameters_ : 1;
// This scope contains an "use asm" annotation.
bool asm_module_ : 1;
// This scope's outer context is an asm module.
bool asm_function_ : 1;
bool force_eager_compilation_ : 1;
// This function scope has a rest parameter.
bool has_rest_ : 1;

25
deps/v8/src/background-parsing-task.cc

@ -6,6 +6,8 @@
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@ -24,24 +26,33 @@ BackgroundParsingTask::BackgroundParsingTask(
// on the foreground thread.
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kProduceFullCodeCache ||
options == ScriptCompiler::kNoCompileOptions);
VMState<PARSER> state(isolate);
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
ParseInfo* info = new ParseInfo(isolate->allocator());
info->InitFromIsolate(isolate);
if (V8_UNLIKELY(FLAG_runtime_stats)) {
info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
}
info->set_toplevel();
source->info.reset(info);
info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding);
std::unique_ptr<Utf16CharacterStream> stream(
ScannerStream::For(source->source_stream.get(), source->encoding,
info->runtime_call_stats()));
info->set_character_stream(std::move(stream));
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
info->set_allow_lazy_parsing();
if (V8_UNLIKELY(FLAG_runtime_stats)) {
info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
if (V8_UNLIKELY(info->block_coverage_enabled())) {
info->AllocateSourceRangeMap();
}
info->set_cached_data(&script_data_);
source->info.reset(info);
source_->info->set_cached_data(&script_data_);
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
source_->parser.reset(new Parser(source_->info.get()));
@ -49,7 +60,6 @@ BackgroundParsingTask::BackgroundParsingTask(
MaybeHandle<ScopeInfo>());
}
void BackgroundParsingTask::Run() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@ -71,5 +81,6 @@ void BackgroundParsingTask::Run() {
script_data_ = nullptr;
}
}
} // namespace internal
} // namespace v8

105
deps/v8/src/bailout-reason.h

@ -17,131 +17,63 @@ namespace internal {
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
"Arguments object value in a test context") \
V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
V(kAssignmentToLetVariableBeforeInitialization, \
"Assignment to let variable before initialization") \
V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
V(kAssignmentToParameterFunctionUsesArgumentsObject, \
"Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
"Assignment to parameter in arguments object") \
V(kBadValueContextForArgumentsObjectValue, \
"Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
"Bad value context for arguments value") \
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
V(kClassConstructorFunction, "Class constructor function") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kComputedPropertyName, "Computed property name") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
V(kCopyBuffersOverlap, "Copy buffers overlap") \
V(kCouldNotGenerateZero, "Could not generate +0.0") \
V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
V(kDeleteWithGlobalVariable, "Delete with global variable") \
V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
V(kDoExpressionUnmodelable, \
"Encountered a do-expression with unmodelable control statements") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kDynamicImport, "Dynamic module import") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kCyclicObjectStateDetectedInEscapeAnalysis, \
"Cyclic object state detected by escape analysis") \
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kExpectedFixedDoubleArrayMap, \
"Expected a fixed double array map in fast shallow clone array literal") \
V(kExpectedFunctionObject, "Expected function object in register") \
V(kExpectedHeapNumber, "Expected HeapNumber") \
V(kExpectedJSReceiver, "Expected object to have receiver type") \
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
V(kExpectedOptimizationSentinel, \
"Expected optimized code cell or optimization sentinel") \
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGenerator, "Generator") \
V(kGetIterator, "GetIterator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
V(kHydrogenFilter, "Optimization disabled by filter") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidFrameForFastNewRestArgumentsStub, \
"Invalid frame for FastNewRestArgumentsStub") \
V(kInvalidFrameForFastNewSloppyArgumentsStub, \
"Invalid frame for FastNewSloppyArgumentsStub") \
V(kInvalidFrameForFastNewStrictArgumentsStub, \
"Invalid frame for FastNewStrictArgumentsStub") \
V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const") \
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
V(kNonSmiValue, "Non-smi value") \
V(kNonObject, "Non-object value") \
V(kNotEnoughVirtualRegistersForValues, \
"Not enough virtual registers for values") \
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
@ -149,40 +81,26 @@ namespace internal {
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAReceiver, "Operand is a smi and not a receiver") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotAReceiver, "Operand is not a receiver") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
V(kObjectTagged, "The object is tagged") \
V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \
V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kDeoptimizedTooManyTimes, "Deoptimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
"Reference to a variable which requires dynamic lookup") \
V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
V(kReferenceToModuleVariable, "Reference to module-allocated variable") \
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kSloppyFunctionExpectsJSReceiverReceiver, \
"Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kSpreadCall, "Call with spread argument") \
@ -194,14 +112,7 @@ namespace internal {
"The current stack pointer is below csp") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
"ToOperand Unsupported double immediate") \
V(kTryCatchStatement, "TryCatchStatement") \
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
@ -216,8 +127,6 @@ namespace internal {
"Unexpected fall-through from string comparison") \
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
"Unexpected fallthrough to CharCodeAt slow case") \
V(kUnexpectedFPUStackDepthAfterInstruction, \
"Unexpected FPU stack depth after instruction") \
V(kUnexpectedInitialMapForArrayFunction1, \
"Unexpected initial map for Array function (1)") \
V(kUnexpectedInitialMapForArrayFunction2, \
@ -232,28 +141,15 @@ namespace internal {
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedTestTypeofLiteralFlag, \
"Unexpected literal flag for TestTypeof bytecode") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
"Unsupported lookup slot in declaration") \
V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const or let variable") \
V(kUnexpectedReturnFromFrameDropper, \
"Unexpectedly returned from dropping frames") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kUnstableConstantTypeHeapObject, "Unstable constant-type heap object") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWithStatement, "WithStatement") \
V(kWrongFunctionContext, "Wrong context passed to function") \
@ -272,7 +168,6 @@ enum BailoutReason {
};
#undef ERROR_MESSAGES_CONSTANTS
const char* GetBailoutReason(BailoutReason reason);
} // namespace internal

8
deps/v8/src/base.isolate

@ -24,14 +24,6 @@
],
},
}],
['OS=="linux" and (asan==1 or cfi_vptr==1 or msan==1 or tsan==1)', {
'variables': {
'files': [
# For llvm-symbolizer.
'../third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6',
],
},
}],
['asan==1 or cfi_vptr==1 or msan==1 or tsan==1', {
'variables': {
'files': [

1
deps/v8/src/base/DEPS

@ -1,7 +1,6 @@
include_rules = [
"-include",
"+include/v8config.h",
"+include/v8stdint.h",
"-src",
"+src/base",
]

265
deps/v8/src/base/atomic-utils.h

@ -54,61 +54,6 @@ class AtomicNumber {
base::AtomicWord value_;
};
// This type uses no barrier accessors to change atomic word. Be careful with
// data races.
template <typename T>
class NoBarrierAtomicValue {
public:
NoBarrierAtomicValue() : value_(0) {}
explicit NoBarrierAtomicValue(T initial)
: value_(cast_helper<T>::to_storage_type(initial)) {}
static NoBarrierAtomicValue* FromAddress(void* address) {
return reinterpret_cast<base::NoBarrierAtomicValue<T>*>(address);
}
V8_INLINE bool TrySetValue(T old_value, T new_value) {
return base::Relaxed_CompareAndSwap(
&value_, cast_helper<T>::to_storage_type(old_value),
cast_helper<T>::to_storage_type(new_value)) ==
cast_helper<T>::to_storage_type(old_value);
}
V8_INLINE T Value() const {
return cast_helper<T>::to_return_type(base::Relaxed_Load(&value_));
}
V8_INLINE void SetValue(T new_value) {
base::Relaxed_Store(&value_, cast_helper<T>::to_storage_type(new_value));
}
private:
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
template <typename S>
struct cast_helper {
static base::AtomicWord to_storage_type(S value) {
return static_cast<base::AtomicWord>(value);
}
static S to_return_type(base::AtomicWord value) {
return static_cast<S>(value);
}
};
template <typename S>
struct cast_helper<S*> {
static base::AtomicWord to_storage_type(S* value) {
return reinterpret_cast<base::AtomicWord>(value);
}
static S* to_return_type(base::AtomicWord value) {
return reinterpret_cast<S*>(value);
}
};
base::AtomicWord value_;
};
// Flag using T atomically. Also accepts void* as T.
template <typename T>
class AtomicValue {
@ -175,107 +120,104 @@ class AtomicValue {
base::AtomicWord value_;
};
// See utils.h for EnumSet. Storage is always base::AtomicWord.
// Requirements on E:
// - No explicit values.
// - E::kLastValue defined to be the last actually used value.
//
// Example:
// enum E { kA, kB, kC, kLastValue = kC };
template <class E>
class AtomicEnumSet {
class AsAtomic32 {
public:
explicit AtomicEnumSet(base::AtomicWord bits = 0) : bits_(bits) {}
bool IsEmpty() const { return ToIntegral() == 0; }
bool Contains(E element) const { return (ToIntegral() & Mask(element)) != 0; }
bool ContainsAnyOf(const AtomicEnumSet& set) const {
return (ToIntegral() & set.ToIntegral()) != 0;
template <typename T>
static T Acquire_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
}
void RemoveAll() { base::Release_Store(&bits_, 0); }
bool operator==(const AtomicEnumSet& set) const {
return ToIntegral() == set.ToIntegral();
template <typename T>
static T Relaxed_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
}
bool operator!=(const AtomicEnumSet& set) const {
return ToIntegral() != set.ToIntegral();
template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
AtomicEnumSet<E> operator|(const AtomicEnumSet& set) const {
return AtomicEnumSet<E>(ToIntegral() | set.ToIntegral());
template <typename T>
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
// The following operations modify the underlying storage.
#define ATOMIC_SET_WRITE(OP, NEW_VAL) \
do { \
base::AtomicWord old; \
do { \
old = base::Acquire_Load(&bits_); \
} while (base::Release_CompareAndSwap(&bits_, old, old OP NEW_VAL) != \
old); \
} while (false)
void Add(E element) { ATOMIC_SET_WRITE(|, Mask(element)); }
void Add(const AtomicEnumSet& set) { ATOMIC_SET_WRITE(|, set.ToIntegral()); }
void Remove(E element) { ATOMIC_SET_WRITE(&, ~Mask(element)); }
void Remove(const AtomicEnumSet& set) {
ATOMIC_SET_WRITE(&, ~set.ToIntegral());
template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
to_storage_type(new_value)));
}
void Intersect(const AtomicEnumSet& set) {
ATOMIC_SET_WRITE(&, set.ToIntegral());
// Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed.
template <typename T>
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
do {
old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits;
} while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
return true;
}
#undef ATOMIC_SET_OP
private:
// Check whether there's enough storage to hold E.
STATIC_ASSERT(E::kLastValue < (sizeof(base::AtomicWord) * CHAR_BIT));
V8_INLINE base::AtomicWord ToIntegral() const {
return base::Acquire_Load(&bits_);
template <typename T>
static base::Atomic32 to_storage_type(T value) {
return static_cast<base::Atomic32>(value);
}
V8_INLINE base::AtomicWord Mask(E element) const {
return static_cast<base::AtomicWord>(1) << element;
template <typename T>
static T to_return_type(base::Atomic32 value) {
return static_cast<T>(value);
}
template <typename T>
static base::Atomic32* to_storage_addr(T* value) {
return reinterpret_cast<base::Atomic32*>(value);
}
template <typename T>
static const base::Atomic32* to_storage_addr(const T* value) {
return reinterpret_cast<const base::Atomic32*>(value);
}
base::AtomicWord bits_;
};
class AsAtomic32 {
class AsAtomicWord {
public:
template <typename T>
static T Acquire_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
}
template <typename T>
static T Relaxed_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
}
template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
@ -283,7 +225,7 @@ class AsAtomic32 {
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
to_storage_type(new_value)));
@ -293,7 +235,7 @@ class AsAtomic32 {
// Returns false if the bits are already set as needed.
template <typename T>
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic32));
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
@ -307,24 +249,81 @@ class AsAtomic32 {
private:
template <typename T>
static base::Atomic32 to_storage_type(T value) {
return static_cast<base::Atomic32>(value);
static base::AtomicWord to_storage_type(T value) {
return static_cast<base::AtomicWord>(value);
}
template <typename T>
static T to_return_type(base::Atomic32 value) {
static T to_return_type(base::AtomicWord value) {
return static_cast<T>(value);
}
template <typename T>
static base::Atomic32* to_storage_addr(T* value) {
return reinterpret_cast<base::Atomic32*>(value);
static base::AtomicWord* to_storage_addr(T* value) {
return reinterpret_cast<base::AtomicWord*>(value);
}
template <typename T>
static const base::Atomic32* to_storage_addr(const T* value) {
return reinterpret_cast<const base::Atomic32*>(value);
static const base::AtomicWord* to_storage_addr(const T* value) {
return reinterpret_cast<const base::AtomicWord*>(value);
}
};
class AsAtomicWord {
class AsAtomic8 {
public:
template <typename T>
static T Acquire_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
}
template <typename T>
static T Relaxed_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
}
template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::Atomic8));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
to_storage_type(new_value)));
}
private:
template <typename T>
static base::Atomic8 to_storage_type(T value) {
return static_cast<base::Atomic8>(value);
}
template <typename T>
static T to_return_type(base::Atomic8 value) {
return static_cast<T>(value);
}
template <typename T>
static base::Atomic8* to_storage_addr(T* value) {
return reinterpret_cast<base::Atomic8*>(value);
}
template <typename T>
static const base::Atomic8* to_storage_addr(const T* value) {
return reinterpret_cast<const base::Atomic8*>(value);
}
};
class AsAtomicPointer {
public:
template <typename T>
static T Acquire_Load(T* addr) {
@ -394,16 +393,16 @@ template <typename T>
class AtomicElement {
public:
AtomicElement(const AtomicElement<T>& other) {
AsAtomicWord::Relaxed_Store(&value_,
AsAtomicWord::Relaxed_Load(&other.value_));
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
}
void operator=(const AtomicElement<T>& other) {
AsAtomicWord::Relaxed_Store(&value_,
AsAtomicWord::Relaxed_Load(&other.value_));
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
}
T value() const { return AsAtomicWord::Relaxed_Load(&value_); }
T value() const { return AsAtomicPointer::Relaxed_Load(&value_); }
bool operator<(const AtomicElement<T>& other) const {
return value() < other.value();

21
deps/v8/src/base/atomicops.h

@ -81,12 +81,13 @@ Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
// These following lower-level operations are typically useful only to people
// implementing higher-level synchronization operations like spinlocks,
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
// a store with appropriate memory-ordering instructions. "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Fence" operations have both "Acquire" and "Release"
// semantics. A MemoryFence() has "Fence" semantics, but does no memory access.
// mutexes, and condition-variables. They combine CompareAndSwap(), a load,
// or a store with appropriate memory-ordering instructions. "Acquire"
// operations ensure that no later memory access can be reordered ahead of the
// operation. "Release" operations ensure that no previous memory access can
// be reordered after the operation. "Fence" operations have both "Acquire"
// and "Release" semantics. A SeqCst_MemoryFence() has "Fence" semantics, but
// does no memory access.
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
@ -94,7 +95,7 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value);
void MemoryFence();
void SeqCst_MemoryFence();
void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
@ -127,10 +128,10 @@ Atomic64 Acquire_Load(volatile const Atomic64* ptr);
} // namespace v8
#if defined(V8_OS_WIN)
// TODO(hpayer): The MSVC header includes windows.h, which other files end up
// relying on. Fix this as part of crbug.com/559247.
#include "src/base/atomicops_internals_x86_msvc.h"
#include "src/base/atomicops_internals_std.h"
#else
// TODO(ulan): Switch to std version after performance regression with Wheezy
// sysroot is no longer relevant. Debian Wheezy LTS ends on 31st of May 2018.
#include "src/base/atomicops_internals_portable.h"
#endif

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save