Browse Source

deps: upgrade V8 to 4.6.85.23

PR-URL: https://github.com/nodejs/node/pull/3351
Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com>
Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl>
v5.x
Michaël Zasso 9 years ago
committed by Ali Ijaz Sheikh
parent
commit
d8011d1683
  1. 1
      deps/v8/.gitignore
  2. 1
      deps/v8/AUTHORS
  3. 191
      deps/v8/BUILD.gn
  4. 571
      deps/v8/ChangeLog
  5. 14
      deps/v8/DEPS
  6. 14
      deps/v8/Makefile
  7. 8
      deps/v8/OWNERS
  8. 55
      deps/v8/PRESUBMIT.py
  9. 2
      deps/v8/WATCHLISTS
  10. 51
      deps/v8/build/download_gold_plugin.py
  11. 6
      deps/v8/build/features.gypi
  12. 1
      deps/v8/build/get_landmines.py
  13. 242
      deps/v8/build/standalone.gypi
  14. 4
      deps/v8/build/toolchain.gypi
  15. 34
      deps/v8/include/v8-platform.h
  16. 7
      deps/v8/include/v8-util.h
  17. 6
      deps/v8/include/v8-version.h
  18. 167
      deps/v8/include/v8.h
  19. 1
      deps/v8/infra/project-config/README.md
  20. 23
      deps/v8/infra/project-config/cr-buildbucket.cfg
  21. 12
      deps/v8/samples/process.cc
  22. 24
      deps/v8/samples/shell.cc
  23. 12
      deps/v8/src/DEPS
  24. 45
      deps/v8/src/accessors.cc
  25. 6
      deps/v8/src/accessors.h
  26. 2
      deps/v8/src/allocation-tracker.cc
  27. 13
      deps/v8/src/allocation-tracker.h
  28. 5
      deps/v8/src/api-natives.h
  29. 384
      deps/v8/src/api.cc
  30. 82
      deps/v8/src/api.h
  31. 4
      deps/v8/src/arguments.cc
  32. 6
      deps/v8/src/arguments.h
  33. 55
      deps/v8/src/arm/assembler-arm-inl.h
  34. 69
      deps/v8/src/arm/assembler-arm.cc
  35. 25
      deps/v8/src/arm/assembler-arm.h
  36. 437
      deps/v8/src/arm/builtins-arm.cc
  37. 344
      deps/v8/src/arm/code-stubs-arm.cc
  38. 2
      deps/v8/src/arm/code-stubs-arm.h
  39. 9
      deps/v8/src/arm/codegen-arm.cc
  40. 3
      deps/v8/src/arm/codegen-arm.h
  41. 2
      deps/v8/src/arm/constants-arm.cc
  42. 6
      deps/v8/src/arm/constants-arm.h
  43. 2
      deps/v8/src/arm/cpu-arm.cc
  44. 248
      deps/v8/src/arm/debug-arm.cc
  45. 4
      deps/v8/src/arm/deoptimizer-arm.cc
  46. 2
      deps/v8/src/arm/disasm-arm.cc
  47. 2
      deps/v8/src/arm/frames-arm.cc
  48. 6
      deps/v8/src/arm/frames-arm.h
  49. 41
      deps/v8/src/arm/interface-descriptors-arm.cc
  50. 52
      deps/v8/src/arm/lithium-arm.cc
  51. 52
      deps/v8/src/arm/lithium-arm.h
  52. 138
      deps/v8/src/arm/lithium-codegen-arm.cc
  53. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  54. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.h
  55. 47
      deps/v8/src/arm/macro-assembler-arm.cc
  56. 78
      deps/v8/src/arm/macro-assembler-arm.h
  57. 17
      deps/v8/src/arm/simulator-arm.cc
  58. 13
      deps/v8/src/arm/simulator-arm.h
  59. 63
      deps/v8/src/arm64/assembler-arm64-inl.h
  60. 57
      deps/v8/src/arm64/assembler-arm64.cc
  61. 39
      deps/v8/src/arm64/assembler-arm64.h
  62. 442
      deps/v8/src/arm64/builtins-arm64.cc
  63. 287
      deps/v8/src/arm64/code-stubs-arm64.cc
  64. 2
      deps/v8/src/arm64/codegen-arm64.cc
  65. 16
      deps/v8/src/arm64/constants-arm64.h
  66. 3
      deps/v8/src/arm64/cpu-arm64.cc
  67. 305
      deps/v8/src/arm64/debug-arm64.cc
  68. 3
      deps/v8/src/arm64/decoder-arm64-inl.h
  69. 2
      deps/v8/src/arm64/decoder-arm64.cc
  70. 1
      deps/v8/src/arm64/decoder-arm64.h
  71. 2
      deps/v8/src/arm64/delayed-masm-arm64.cc
  72. 5
      deps/v8/src/arm64/deoptimizer-arm64.cc
  73. 21
      deps/v8/src/arm64/disasm-arm64.cc
  74. 2
      deps/v8/src/arm64/disasm-arm64.h
  75. 2
      deps/v8/src/arm64/frames-arm64.cc
  76. 6
      deps/v8/src/arm64/frames-arm64.h
  77. 2
      deps/v8/src/arm64/instructions-arm64.cc
  78. 6
      deps/v8/src/arm64/instrument-arm64.cc
  79. 40
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  80. 54
      deps/v8/src/arm64/lithium-arm64.cc
  81. 42
      deps/v8/src/arm64/lithium-arm64.h
  82. 140
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  83. 2
      deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
  84. 2
      deps/v8/src/arm64/lithium-gap-resolver-arm64.h
  85. 17
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  86. 62
      deps/v8/src/arm64/macro-assembler-arm64.cc
  87. 33
      deps/v8/src/arm64/macro-assembler-arm64.h
  88. 21
      deps/v8/src/arm64/simulator-arm64.cc
  89. 13
      deps/v8/src/arm64/simulator-arm64.h
  90. 1
      deps/v8/src/arm64/utils-arm64.h
  91. 12
      deps/v8/src/array-iterator.js
  92. 107
      deps/v8/src/array.js
  93. 3
      deps/v8/src/arraybuffer.js
  94. 413
      deps/v8/src/assembler.cc
  95. 150
      deps/v8/src/assembler.h
  96. 2
      deps/v8/src/assert-scope.cc
  97. 4
      deps/v8/src/ast-literal-reindexer.cc
  98. 2
      deps/v8/src/ast-literal-reindexer.h
  99. 53
      deps/v8/src/ast-numbering.cc
  100. 10
      deps/v8/src/ast-numbering.h

1
deps/v8/.gitignore

@ -60,6 +60,7 @@ shell_g
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/simdjs/CHECKED_OUT_*
/test/simdjs/ecmascript_simd*
/test/simdjs/data*
/test/test262/data

1
deps/v8/AUTHORS

@ -44,6 +44,7 @@ Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>

191
deps/v8/BUILD.gn

@ -203,8 +203,8 @@ action("js2c") {
sources = [
"src/macros.py",
"src/messages.h",
"src/runtime.js",
"src/prologue.js",
"src/runtime.js",
"src/v8natives.js",
"src/symbol.js",
"src/array.js",
@ -227,12 +227,12 @@ action("js2c") {
"src/json.js",
"src/array-iterator.js",
"src/string-iterator.js",
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/templates.js",
"src/harmony-array.js",
"src/harmony-typedarray.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
]
outputs = [
@ -257,6 +257,40 @@ action("js2c") {
}
}
action("js2c_code_stubs") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
sources = [
"src/macros.py",
"src/messages.h",
"src/code-stubs.js"
]
outputs = [
"$target_gen_dir/code-stub-libraries.cc",
]
args = [
rebase_path("$target_gen_dir/code-stub-libraries.cc",
root_build_dir),
"CODE_STUB",
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_code_stub.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_code_stub.bin", root_build_dir),
]
}
}
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -279,7 +313,9 @@ action("js2c_experimental") {
"src/harmony-reflect.js",
"src/harmony-spread.js",
"src/harmony-object.js",
"src/harmony-sharedarraybuffer.js"
"src/harmony-object-observe.js",
"src/harmony-sharedarraybuffer.js",
"src/harmony-simd.js"
]
outputs = [
@ -355,12 +391,14 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
]
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
]
@ -446,6 +484,7 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@ -453,6 +492,7 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"src/snapshot/snapshot-empty.cc",
@ -477,6 +517,7 @@ source_set("v8_snapshot") {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@ -489,6 +530,7 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/snapshot.cc",
@ -509,6 +551,7 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":v8_base",
@ -587,13 +630,13 @@ source_set("v8_base") {
"src/bootstrapper.h",
"src/builtins.cc",
"src/builtins.h",
"src/bytecodes-irregexp.h",
"src/cancelable-task.cc",
"src/cancelable-task.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/char-predicates.cc",
"src/char-predicates-inl.h",
"src/char-predicates.h",
"src/checks.cc",
"src/checks.h",
"src/circular-queue-inl.h",
"src/circular-queue.h",
@ -623,6 +666,7 @@ source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/c-linkage.cc",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
@ -643,6 +687,7 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
"src/compiler/frame-elider.h",
@ -650,7 +695,6 @@ source_set("v8_base") {
"src/compiler/frame-states.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-builder.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
@ -669,8 +713,12 @@ source_set("v8_base") {
"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
"src/compiler/interpreter-assembler.cc",
"src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-context-relaxation.cc",
"src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
"src/compiler/js-frame-specialization.cc",
@ -687,11 +735,12 @@ source_set("v8_base") {
"src/compiler/js-operator.h",
"src/compiler/js-type-feedback.cc",
"src/compiler/js-type-feedback.h",
"src/compiler/js-type-feedback-lowering.cc",
"src/compiler/js-type-feedback-lowering.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
"src/compiler/jump-threading.h",
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/liveness-analyzer.cc",
@ -732,6 +781,8 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/preprocess-live-ranges.cc",
"src/compiler/preprocess-live-ranges.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
@ -769,6 +820,8 @@ source_set("v8_base") {
"src/compiler/zone-pool.h",
"src/compiler.cc",
"src/compiler.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts.cc",
"src/contexts.h",
"src/conversions-inl.h",
@ -784,8 +837,16 @@ source_set("v8_base") {
"src/dateparser-inl.h",
"src/dateparser.cc",
"src/dateparser.h",
"src/debug.cc",
"src/debug.h",
"src/debug/debug-evaluate.cc",
"src/debug/debug-evaluate.h",
"src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
"src/debug/debug-scopes.cc",
"src/debug/debug-scopes.h",
"src/debug/debug.cc",
"src/debug/debug.h",
"src/debug/liveedit.cc",
"src/debug/liveedit.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
"src/disasm.h",
@ -828,10 +889,12 @@ source_set("v8_base") {
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
"src/full-codegen.cc",
"src/full-codegen.h",
"src/full-codegen/full-codegen.cc",
"src/full-codegen/full-codegen.h",
"src/func-name-inferrer.cc",
"src/func-name-inferrer.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
"src/gdb-jit.h",
"src/global-handles.cc",
@ -944,15 +1007,18 @@ source_set("v8_base") {
"src/ic/stub-cache.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter-irregexp.cc",
"src/interpreter-irregexp.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
"src/jsregexp-inl.h",
"src/jsregexp.cc",
"src/jsregexp.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
@ -965,8 +1031,6 @@ source_set("v8_base") {
"src/lithium-codegen.h",
"src/lithium.cc",
"src/lithium.h",
"src/liveedit.cc",
"src/liveedit.h",
"src/log-inl.h",
"src/log-utils.cc",
"src/log-utils.h",
@ -1009,17 +1073,23 @@ source_set("v8_base") {
"src/property.cc",
"src/property.h",
"src/prototype.h",
"src/regexp-macro-assembler-irregexp-inl.h",
"src/regexp-macro-assembler-irregexp.cc",
"src/regexp-macro-assembler-irregexp.h",
"src/regexp-macro-assembler-tracer.cc",
"src/regexp-macro-assembler-tracer.h",
"src/regexp-macro-assembler.cc",
"src/regexp-macro-assembler.h",
"src/regexp-stack.cc",
"src/regexp-stack.h",
"src/rewriter.cc",
"src/rewriter.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
"src/regexp/regexp-macro-assembler-irregexp.cc",
"src/regexp/regexp-macro-assembler-irregexp.h",
"src/regexp/regexp-macro-assembler-tracer.cc",
"src/regexp/regexp-macro-assembler-tracer.h",
"src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
@ -1031,6 +1101,7 @@ source_set("v8_base") {
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-futex.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
@ -1044,6 +1115,7 @@ source_set("v8_base") {
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
"src/runtime/runtime-simd.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
@ -1067,8 +1139,8 @@ source_set("v8_base") {
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot/natives.h",
"src/snapshot/natives-common.cc",
"src/snapshot/serialize.cc",
"src/snapshot/serialize.h",
"src/snapshot/snapshot-common.cc",
@ -1081,7 +1153,6 @@ source_set("v8_base") {
"src/startup-data-util.cc",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
@ -1144,12 +1215,10 @@ source_set("v8_base") {
"src/ia32/codegen-ia32.cc",
"src/ia32/codegen-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/debug-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/full-codegen-ia32.cc",
"src/ia32/interface-descriptors-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h",
@ -1159,17 +1228,18 @@ source_set("v8_base") {
"src/ia32/lithium-ia32.h",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
"src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
} else if (v8_target_arch == "x64") {
sources += [
@ -1182,12 +1252,10 @@ source_set("v8_base") {
"src/x64/codegen-x64.cc",
"src/x64/codegen-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/debug-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/full-codegen-x64.cc",
"src/x64/interface-descriptors-x64.cc",
"src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h",
@ -1197,17 +1265,18 @@ source_set("v8_base") {
"src/x64/lithium-x64.h",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
"src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/linkage-x64.cc",
"src/debug/x64/debug-x64.cc",
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
]
} else if (v8_target_arch == "arm") {
sources += [
@ -1222,12 +1291,10 @@ source_set("v8_base") {
"src/arm/constants-arm.h",
"src/arm/constants-arm.cc",
"src/arm/cpu-arm.cc",
"src/arm/debug-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/full-codegen-arm.cc",
"src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
"src/arm/lithium-arm.cc",
@ -1238,19 +1305,20 @@ source_set("v8_base") {
"src/arm/lithium-gap-resolver-arm.h",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/linkage-arm.cc",
"src/debug/arm/debug-arm.cc",
"src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc",
"src/ic/arm/ic-compiler-arm.cc",
"src/ic/arm/stub-cache-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
]
} else if (v8_target_arch == "arm64") {
sources += [
@ -1264,7 +1332,6 @@ source_set("v8_base") {
"src/arm64/code-stubs-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/debug-arm64.cc",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
@ -1273,7 +1340,6 @@ source_set("v8_base") {
"src/arm64/disasm-arm64.h",
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/full-codegen-arm64.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
@ -1289,8 +1355,6 @@ source_set("v8_base") {
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/regexp-macro-assembler-arm64.cc",
"src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
@ -1298,12 +1362,15 @@ source_set("v8_base") {
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/linkage-arm64.cc",
"src/debug/arm64/debug-arm64.cc",
"src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc",
"src/ic/arm64/ic-compiler-arm64.cc",
"src/ic/arm64/stub-cache-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
} else if (v8_target_arch == "mipsel") {
sources += [
@ -1318,12 +1385,10 @@ source_set("v8_base") {
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
"src/mips/debug-mips.cc",
"src/mips/deoptimizer-mips.cc",
"src/mips/disasm-mips.cc",
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
"src/mips/full-codegen-mips.cc",
"src/mips/interface-descriptors-mips.cc",
"src/mips/lithium-codegen-mips.cc",
"src/mips/lithium-codegen-mips.h",
@ -1333,19 +1398,20 @@ source_set("v8_base") {
"src/mips/lithium-mips.h",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc",
"src/compiler/mips/linkage-mips.cc",
"src/debug/mips/debug-mips.cc",
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
]
} else if (v8_target_arch == "mips64el") {
sources += [
@ -1360,12 +1426,10 @@ source_set("v8_base") {
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
"src/mips64/debug-mips64.cc",
"src/mips64/deoptimizer-mips64.cc",
"src/mips64/disasm-mips64.cc",
"src/mips64/frames-mips64.cc",
"src/mips64/frames-mips64.h",
"src/mips64/full-codegen-mips64.cc",
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/lithium-codegen-mips64.cc",
"src/mips64/lithium-codegen-mips64.h",
@ -1375,15 +1439,17 @@ source_set("v8_base") {
"src/mips64/lithium-mips64.h",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
"src/mips64/regexp-macro-assembler-mips64.cc",
"src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/debug/mips64/debug-mips64.cc",
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
}
@ -1479,6 +1545,7 @@ source_set("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
"src/base/smart-pointers.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/utils/random-number-generator.cc",
@ -1700,7 +1767,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
"//build/config/sanitizers:deps",
]
# TODO(jochen): Add support for readline and vtunejit.
# TODO(jochen): Add support for vtunejit.
if (is_posix) {
sources += [ "src/d8-posix.cc" ]
@ -1710,8 +1777,6 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
if (!is_component_build) {
sources += [
"src/d8-debug.cc",
"src/d8-debug.h",
"$target_gen_dir/d8-js.cc",
]
}

571
deps/v8/ChangeLog

@ -1,3 +1,574 @@
2015-08-19: Version 4.6.85
Performance and stability improvements on all platforms.
2015-08-19: Version 4.6.84
Performance and stability improvements on all platforms.
2015-08-19: Version 4.6.83
Performance and stability improvements on all platforms.
2015-08-18: Version 4.6.82
Performance and stability improvements on all platforms.
2015-08-18: Version 4.6.81
Performance and stability improvements on all platforms.
2015-08-18: Version 4.6.80
Filter out slot buffer slots, that point to SMIs in dead objects
(Chromium issues 454297, 519577).
Performance and stability improvements on all platforms.
2015-08-17: Version 4.6.79
Performance and stability improvements on all platforms.
2015-08-17: Version 4.6.78
Put V8 extras into the snapshot.
Performance and stability improvements on all platforms.
2015-08-15: Version 4.6.77
Performance and stability improvements on all platforms.
2015-08-14: Version 4.6.76
Performance and stability improvements on all platforms.
2015-08-14: Version 4.6.75
Performance and stability improvements on all platforms.
2015-08-14: Version 4.6.74
Performance and stability improvements on all platforms.
2015-08-13: Version 4.6.73
Performance and stability improvements on all platforms.
2015-08-13: Version 4.6.72
Stage sloppy classes (issue 3305).
Performance and stability improvements on all platforms.
2015-08-13: Version 4.6.71
Performance and stability improvements on all platforms.
2015-08-12: Version 4.6.70
Performance and stability improvements on all platforms.
2015-08-12: Version 4.6.69
Stage --harmony-array-includes (issue 3575).
Performance and stability improvements on all platforms.
2015-08-12: Version 4.6.68
Use a new lexical context for sloppy-mode eval (issue 4288).
Add includes method to typed arrays (issue 3575).
Performance and stability improvements on all platforms.
2015-08-11: Version 4.6.67
Performance and stability improvements on all platforms.
2015-08-11: Version 4.6.66
Performance and stability improvements on all platforms.
2015-08-11: Version 4.6.65
Performance and stability improvements on all platforms.
2015-08-10: Version 4.6.64
Disable --global-var-shortcuts (Chromium issue 517778).
Performance and stability improvements on all platforms.
2015-08-10: Version 4.6.63
Performance and stability improvements on all platforms.
2015-08-09: Version 4.6.62
Performance and stability improvements on all platforms.
2015-08-08: Version 4.6.61
Performance and stability improvements on all platforms.
2015-08-08: Version 4.6.60
[IC] Make SeededNumberDictionary::UpdateMaxNumberKey prototype aware
(issue 4335).
Performance and stability improvements on all platforms.
2015-08-08: Version 4.6.59
Performance and stability improvements on all platforms.
2015-08-07: Version 4.6.58
Performance and stability improvements on all platforms.
2015-08-07: Version 4.6.57
Rename "extras exports" to "extras binding" (Chromium issue 507133).
Performance and stability improvements on all platforms.
2015-08-07: Version 4.6.56
Performance and stability improvements on all platforms.
2015-08-06: Version 4.6.55
Fix off-by-one in Array.concat's max index check (Chromium issue
516592).
Performance and stability improvements on all platforms.
2015-08-06: Version 4.6.54
Performance and stability improvements on all platforms.
2015-08-06: Version 4.6.53
Performance and stability improvements on all platforms.
2015-08-05: Version 4.6.52
Ship --harmony-new-target (issue 3887).
Performance and stability improvements on all platforms.
2015-08-04: Version 4.6.51
Performance and stability improvements on all platforms.
2015-08-04: Version 4.6.50
Performance and stability improvements on all platforms.
2015-08-03: Version 4.6.49
SIMD.js Add the other SIMD Phase 1 types (issue 4124).
Performance and stability improvements on all platforms.
2015-08-03: Version 4.6.48
Performance and stability improvements on all platforms.
2015-08-03: Version 4.6.47
Performance and stability improvements on all platforms.
2015-08-01: Version 4.6.46
Performance and stability improvements on all platforms.
2015-08-01: Version 4.6.45
Performance and stability improvements on all platforms.
2015-08-01: Version 4.6.44
Performance and stability improvements on all platforms.
2015-07-31: Version 4.6.43
Performance and stability improvements on all platforms.
2015-07-31: Version 4.6.42
Performance and stability improvements on all platforms.
2015-07-31: Version 4.6.41
Performance and stability improvements on all platforms.
2015-07-30: Version 4.6.40
Pass the kGCCallbackFlagForced flag when invoking
Heap::CollectAllGarbage from AdjustAmountOfExternalAllocatedMemory
(Chromium issue 511294).
Performance and stability improvements on all platforms.
2015-07-30: Version 4.6.39
Performance and stability improvements on all platforms.
2015-07-30: Version 4.6.38
Performance and stability improvements on all platforms.
2015-07-29: Version 4.6.37
Performance and stability improvements on all platforms.
2015-07-28: Version 4.6.36
Fix prototype registration upon SlowToFast migration (Chromium issue
513602).
Bugfix: Incorrect type feedback vector structure on recompile (Chromium
issue 514526).
Reland of "Remove ExternalArray, derived types, and element kinds"
(issue 3996).
Performance and stability improvements on all platforms.
2015-07-28: Version 4.6.35
Performance and stability improvements on all platforms.
2015-07-28: Version 4.6.34
Remove ExternalArray, derived types, and element kinds (issue 3996).
Make V8 compile with MSVS 2015 (issue 4326).
Performance and stability improvements on all platforms.
2015-07-27: Version 4.6.33
Performance and stability improvements on all platforms.
2015-07-26: Version 4.6.32
Performance and stability improvements on all platforms.
2015-07-25: Version 4.6.31
Performance and stability improvements on all platforms.
2015-07-25: Version 4.6.30
Make dates default to the local timezone if none specified (issue 4242,
Chromium issue 391730).
Performance and stability improvements on all platforms.
2015-07-24: Version 4.6.29
Performance and stability improvements on all platforms.
2015-07-24: Version 4.6.28
Performance and stability improvements on all platforms.
2015-07-23: Version 4.6.27
Fix check for a date with a 24th hour (Chromium issue 174609).
Performance and stability improvements on all platforms.
2015-07-23: Version 4.6.26
Performance and stability improvements on all platforms.
2015-07-22: Version 4.6.25
Performance and stability improvements on all platforms.
2015-07-22: Version 4.6.24
Performance and stability improvements on all platforms.
2015-07-22: Version 4.6.23
Performance and stability improvements on all platforms.
2015-07-21: Version 4.6.22
Performance and stability improvements on all platforms.
2015-07-21: Version 4.6.21
Performance and stability improvements on all platforms.
2015-07-21: Version 4.6.20
Don't run the second pass of the pending phantom callbacks if the heap
has been torn down (Chromium issue 511204).
Debugger: prepare code for debugging on a per-function basis (issue
4132).
Performance and stability improvements on all platforms.
2015-07-20: Version 4.6.19
Performance and stability improvements on all platforms.
2015-07-20: Version 4.6.18
Performance and stability improvements on all platforms.
2015-07-19: Version 4.6.17
Performance and stability improvements on all platforms.
2015-07-18: Version 4.6.16
Performance and stability improvements on all platforms.
2015-07-18: Version 4.6.15
Make NumberFormat use the ICU currency data, fix bug in NumberFormat
(Chromium issues 304722, 435465, 473104).
Properly fix enumerate / Object.keys wrt access checked objects
(Chromium issue 509936).
Fix object enumeration wrt access checked objects (Chromium issue
509936).
Fix DefineOwnProperty for data properties wrt failed access checks
(Chromium issue 509936).
Fix GetOwnPropertyNames on access-checked objects (Chromium issue
509936).
Fix getPrototypeOf for access checked objects (Chromium issue 509936).
Delete APIs deprecated since last release.
Performance and stability improvements on all platforms.
2015-07-17: Version 4.6.14
Array.prototype.reverse should call [[HasProperty]] on elements before
[[Get]] (issue 4223).
In RegExp, lastIndex is read with ToLength, not ToInteger (issue 4244).
Stage --harmony-new-target (issue 3887).
Re-ship harmony spread calls and spread arrays (issue 3018).
Expose SIMD.Float32x4 type to Javascript. This CL exposes the
constructor function, defines type related information, and implements
value type semantics. It also refactors test/mjsunit/samevalue.js to
test SameValue and SameValueZero (issue 4124).
Performance and stability improvements on all platforms.
2015-07-17: Version 4.6.13
Performance and stability improvements on all platforms.
2015-07-16: Version 4.6.12
Performance and stability improvements on all platforms.
2015-07-16: Version 4.6.11
Performance and stability improvements on all platforms.
2015-07-16: Version 4.6.10
Expose SIMD.Float32x4 type to Javascript. This CL exposes the
constructor function, defines type related information, and implements
value type semantics. It also refactors test/mjsunit/samevalue.js to
test SameValue and SameValueZero (issue 4124).
Fix runtime-atomics for Win 10 SDK and remove volatile (Chromium issues
440500, 491424).
Performance and stability improvements on all platforms.
2015-07-15: Version 4.6.9
Let the second pass phantom callbacks run in a separate task on the
foreground thread.
Performance and stability improvements on all platforms.
2015-07-15: Version 4.6.8
Optimize String.prototype.includes (issue 3807).
Unship spread calls and spread arrays (issue 4298).
Performance and stability improvements on all platforms.
2015-07-15: Version 4.6.7
Performance and stability improvements on all platforms.
2015-07-14: Version 4.6.6
Performance and stability improvements on all platforms.
2015-07-14: Version 4.6.5
Performance and stability improvements on all platforms.
2015-07-14: Version 4.6.4
MIPS64: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
if needed (issue 4294).
Add convenience method for converting v8::PersistentBase to v8::Local.
Performance and stability improvements on all platforms.
2015-07-13: Version 4.6.3
MIPS: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
if needed (issue 4294).
Performance and stability improvements on all platforms.
2015-07-13: Version 4.6.2
[arm] CheckConstPool between TurboFan instructions (issue 4292).
Fix keyed access of primitive objects in the runtime. For now it uses a
pretty slow path for accessing strings by wrapping it into a new
temporary wrapper (issues 3088, 4042).
Performance and stability improvements on all platforms.
2015-07-12: Version 4.6.1
Performance and stability improvements on all platforms.
2015-07-09: Version 4.5.107
[arm] Don't call branch_offset within CheckConstPool (issue 4292).
[arm] Fix missing CheckBuffer for branches (issue 4292).
Performance and stability improvements on all platforms.
2015-07-09: Version 4.5.106
Performance and stability improvements on all platforms.
2015-07-09: Version 4.5.105
Guard @@isConcatSpreadable behind a flag (Chromium issue 507553).
Performance and stability improvements on all platforms.
2015-07-08: Version 4.5.104
[x64] Fix handling of Smi constants in LSubI and LBitI (Chromium issue
478612).
Performance and stability improvements on all platforms.
2015-07-08: Version 4.5.103
Performance and stability improvements on all platforms.

14
deps/v8/DEPS

@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "5122240c5e5c4d8da12c543d82b03d6089eb77c5",
Var("git_url") + "/external/gyp.git" + "@" + "6ee91ad8659871916f9aa840d42e1513befdf638",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "c81a1a3989c3b66fa323e9a6ee7418d7c08297af",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "89dcdec16381883782b9cc9cff38e00f047a0f46",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "ecc8e253abac3b6186a97573871a084f4c0ca3ae",
Var("git_url") + "/chromium/buildtools.git" + "@" + "565d04e8741429fb1b4f26d102f2c6c3b849edeb",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "23574bf2333f834ff665f894c97bef8a5b33a0a9",
Var("git_url") + "/external/googletest.git" + "@" + "9855a87157778d39b95eccfb201a9dc90f6d61c6",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "73ec8804ed395b0886d6edf82a9f33583f4a7902",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5b12e334ec0e571a8e1f68d028dc5427b58c17ec",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "21f4bcbd6cd927e4b4227cfde7d5f13486be1236",
Var("git_url") + "/android_tools.git" + "@" + "9e9b6169a098bc19986e44fbbf65e4c29031e4bd",
},
"win": {
"v8/third_party/cygwin":

14
deps/v8/Makefile

@ -44,10 +44,6 @@ endif
ifdef component
GYPFLAGS += -Dcomponent=$(component)
endif
# console=readline
ifdef console
GYPFLAGS += -Dconsole=$(console)
endif
# disassembler=on
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
@ -162,7 +158,9 @@ endif
ifdef embedscript
GYPFLAGS += -Dembed_script=$(embedscript)
endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
@ -218,6 +216,12 @@ ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
# Optionally enable wasm prototype.
# Assume you've placed a link to v8-native-prototype in third_party/wasm.
ifeq ($(wasm), on)
GYPFLAGS += -Dv8_wasm=1
endif
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)

8
deps/v8/OWNERS

@ -1,21 +1,21 @@
adamk@chromium.org
arv@chromium.org
bmeurer@chromium.org
danno@chromium.org
dcarney@chromium.org
dslomov@chromium.org
epertoso@chromium.org
hablich@chromium.org
hpayer@chromium.org
ishell@chromium.org
jarin@chromium.org
jkummerow@chromium.org
jochen@chromium.org
littledan@chromium.org
machenbach@chromium.org
mlippautz@chromium.org
marja@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
svenpanne@chromium.org
titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org

55
deps/v8/PRESUBMIT.py

@ -141,6 +141,39 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
"""Attempts to prevent inclusion of inline headers into normal header
files. This tries to establish a layering where inline headers can be
included by other inline headers or compilation units only."""
file_inclusion_pattern = r'(?!.+-inl\.h).+\.h'
include_directive_pattern = input_api.re.compile(r'#include ".+-inl.h"')
include_warning = (
'You might be including an inline header (e.g. foo-inl.h) within a\n'
'normal header (e.g. bar.h) file. Can you avoid introducing the\n'
'#include? The commit queue will not block on this warning.')
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (include_directive_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(include_warning, problems)]
else:
return []
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
@ -195,6 +228,8 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
return results
@ -209,28 +244,32 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api):
def _CheckChangeLogFlag(input_api, output_api, warn):
"""Checks usage of LOG= flag in the commit message."""
results = []
if input_api.change.BUG and not 'LOG' in input_api.change.tags:
results.append(output_api.PresubmitError(
'An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.'))
if (input_api.change.BUG and input_api.change.BUG != 'none' and
not 'LOG' in input_api.change.tags):
text = ('An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.')
if warn:
results.append(output_api.PresubmitPromptWarning(text))
else:
results.append(output_api.PresubmitError(text))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, True))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, False))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):

2
deps/v8/WATCHLISTS

@ -40,7 +40,7 @@
'filepath': 'src/snapshot/',
},
'debugger': {
'filepath': 'src/debug\.(cc|h)|src/.*-debugger\.js|src/runtime/runtime-debug\.cc',
'filepath': 'src/debug/',
},
},

51
deps/v8/build/download_gold_plugin.py

@ -0,0 +1,51 @@
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download LLVM gold plugin from google storage."""
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHROME_SRC, 'tools'))
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
'update.py')
CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
def main():
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
os.chdir(LLVM_BUILD_PATH)
# TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0
# (currently aborts with exit code 1,
# https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the
# stdin->stderr redirect in update.py to do something else (crbug.com/494442).
subprocess.check_call(['python', GSUTIL_PATH,
'cp', remote_path, targz_name],
stderr=open('/dev/null', 'w'))
subprocess.check_call(['tar', 'xzf', targz_name])
os.remove(targz_name)
return 0
if __name__ == '__main__':
sys.exit(main())

6
deps/v8/build/features.gypi

@ -64,6 +64,9 @@
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
# Set to 1 to enable building with wasm prototype.
'v8_wasm%': 0,
},
'target_defaults': {
'conditions': [
@ -103,6 +106,9 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
['v8_wasm!=0', {
'defines': ['V8_WASM',],
}],
], # conditions
'configurations': {
'DebugBaseCommon': {

1
deps/v8/build/get_landmines.py

@ -23,6 +23,7 @@ def main():
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
return 0

242
deps/v8/build/standalone.gypi

@ -88,6 +88,13 @@
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'use_lto%': 0,
# Control Flow Integrity for virtual calls and casts.
# See http://clang.llvm.org/docs/ControlFlowIntegrity.html
'cfi_vptr%': 0,
'cfi_diag%': 0,
# goma settings.
# 1 to use goma.
# If no gomadir is set, it uses the default gomadir.
@ -105,6 +112,16 @@
}, {
'host_clang%': '0',
}],
# linux_use_bundled_gold: whether to use the gold linker binary checked
# into third_party/binutils. Force this off via GYP_DEFINES when you
# are using a custom toolchain and need to control -B in ldflags.
# Do not use 32-bit gold on 32-bit hosts as it runs out address space
# for component=static_library builds.
['(OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))', {
'linux_use_bundled_gold%': 1,
}, {
'linux_use_bundled_gold%': 0,
}],
],
},
'base_dir%': '<(base_dir)',
@ -122,6 +139,10 @@
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
'linux_use_bundled_gold%': '<(linux_use_bundled_gold)',
'use_lto%': '<(use_lto)',
'cfi_vptr%': '<(cfi_vptr)',
'cfi_diag%': '<(cfi_diag)',
# Add a simple extra solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@ -148,7 +169,7 @@
# the JS builtins sources and the start snapshot.
# Embedders that don't use standalone.gypi will need to add
# their own default value.
'v8_use_external_startup_data%': 0,
'v8_use_external_startup_data%': 1,
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
@ -179,8 +200,8 @@
}],
],
}],
['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")', {
['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
@ -207,10 +228,8 @@
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
['OS=="linux"', {
# Gradually roll out v8_use_external_startup_data.
# Should eventually be default enabled on all platforms.
'v8_use_external_startup_data%': 1,
['cfi_vptr==1', {
'use_lto%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
@ -358,6 +377,19 @@
'Release': {
'cflags+': ['<@(release_extra_cflags)'],
},
'conditions': [
['OS=="win"', {
'Optdebug_x64': {
'inherit_from': ['Optdebug'],
},
'Debug_x64': {
'inherit_from': ['Debug'],
},
'Release_x64': {
'inherit_from': ['Release'],
},
}],
],
},
'conditions':[
['(clang==1 or host_clang==1) and OS!="win"', {
@ -522,6 +554,21 @@
}],
],
}],
['linux_use_bundled_gold==1 and not (clang==0 and use_lto==1)', {
# Put our binutils, which contains gold in the search path. We pass
# the path to gold to the compiler. gyp leaves unspecified what the
# cwd is when running the compiler, so the normal gyp path-munging
# fails us. This hack gets the right path.
#
# Disabled when using GCC LTO because GCC also uses the -B search
# path at link time to find "as", and our bundled "as" can only
# target x86.
'ldflags': [
# Note, Chromium allows ia32 host arch as well, we limit this to
# x64 in v8.
'-B<(base_dir)/third_party/binutils/Linux_x64/Release/bin',
],
}],
],
},
}],
@ -658,7 +705,85 @@
}],
],
'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
'msvs_disabled_warnings': [4355, 4800],
'msvs_disabled_warnings': [
# C4091: 'typedef ': ignored on left of 'X' when no variable is
# declared.
# This happens in a number of Windows headers. Dumb.
4091,
# C4127: conditional expression is constant
# This warning can in theory catch dead code and other problems, but
# triggers in far too many desirable cases where the conditional
# expression is either set by macros or corresponds some legitimate
# compile-time constant expression (due to constant template args,
# conditionals comparing the sizes of different types, etc.). Some of
# these can be worked around, but it's not worth it.
4127,
# C4351: new behavior: elements of array 'array' will be default
# initialized
# This is a silly "warning" that basically just alerts you that the
# compiler is going to actually follow the language spec like it's
# supposed to, instead of not following it like old buggy versions
# did. There's absolutely no reason to turn this on.
4351,
# C4355: 'this': used in base member initializer list
# It's commonly useful to pass |this| to objects in a class'
# initializer list. While this warning can catch real bugs, most of
# the time the constructors in question don't attempt to call methods
# on the passed-in pointer (until later), and annotating every legit
# usage of this is simply more hassle than the warning is worth.
4355,
# C4503: 'identifier': decorated name length exceeded, name was
# truncated
# This only means that some long error messages might have truncated
# identifiers in the presence of lots of templates. It has no effect
# on program correctness and there's no real reason to waste time
# trying to prevent it.
4503,
# Warning C4589 says: "Constructor of abstract class ignores
# initializer for virtual base class." Disable this warning because it
# is flaky in VS 2015 RTM. It triggers on compiler generated
# copy-constructors in some cases.
4589,
# C4611: interaction between 'function' and C++ object destruction is
# non-portable
# This warning is unavoidable when using e.g. setjmp/longjmp. MSDN
# suggests using exceptions instead of setjmp/longjmp for C++, but
# Chromium code compiles without exception support. We therefore have
# to use setjmp/longjmp for e.g. JPEG decode error handling, which
# means we have to turn off this warning (and be careful about how
# object destruction happens in such cases).
4611,
# TODO(jochen): These warnings are level 4. They will be slowly
# removed as code is fixed.
4100, # Unreferenced formal parameter
4121, # Alignment of a member was sensitive to packing
4244, # Conversion from 'type1' to 'type2', possible loss of data
4302, # Truncation from 'type 1' to 'type 2'
4309, # Truncation of constant value
4311, # Pointer truncation from 'type' to 'type'
4312, # Conversion from 'type1' to 'type2' of greater size
4481, # Nonstandard extension used: override specifier 'keyword'
4505, # Unreferenced local function has been removed
4510, # Default constructor could not be generated
4512, # Assignment operator could not be generated
4610, # Object can never be instantiated
4800, # Forcing value to bool.
4838, # Narrowing conversion. Doesn't seem to be very useful.
4995, # 'X': name was marked as #pragma deprecated
4996, # 'X': was declared deprecated (for GetVersionEx).
# These are variable shadowing warnings that are new in VS2015. We
# should work through these at some point -- they may be removed from
# the RTM release in the /W4 set.
4456, 4457, 4458, 4459,
],
'msvs_settings': {
'VCCLCompilerTool': {
'MinimalRebuild': 'false',
@ -774,6 +899,12 @@
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x
},
'conditions': [
['v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el"', {
'xcode_settings': {'WARNING_CFLAGS': ['-Wshorten-64-to-32']},
}],
],
}],
],
'target_conditions': [
@ -1047,5 +1178,100 @@
['CXX.host_wrapper', '<(gomadir)/gomacc'],
],
}],
['use_lto==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-flto',
],
}],
],
},
}],
['use_lto==1 and clang==0', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-ffat-lto-objects',
],
}],
],
},
}],
['use_lto==1 and clang==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'arflags': [
'--plugin', '<(clang_dir)/lib/LLVMgold.so',
],
# Apply a lower optimization level with lto. Chromium does this
# for non-official builds only - a differentiation that doesn't
# exist in v8.
'ldflags': [
'-Wl,--plugin-opt,O1',
],
}],
],
},
}],
['use_lto==1 and clang==0', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [
'-flto=32',
],
}],
],
},
}],
['use_lto==1 and clang==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [
'-flto',
],
}],
],
},
}],
['cfi_diag==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fno-sanitize-trap=cfi',
'-fsanitize-recover=cfi',
],
'ldflags': [
'-fno-sanitize-trap=cfi',
'-fsanitize-recover=cfi',
],
}],
],
},
}],
['cfi_vptr==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=cfi-vcall',
'-fsanitize=cfi-derived-cast',
'-fsanitize=cfi-unrelated-cast',
],
'ldflags': [
'-fsanitize=cfi-vcall',
'-fsanitize=cfi-derived-cast',
'-fsanitize=cfi-unrelated-cast',
],
}],
],
},
}],
],
}

4
deps/v8/build/toolchain.gypi

@ -1149,7 +1149,9 @@
}],
],
}],
['linux_use_gold_flags==1', {
# TODO(pcc): Re-enable in LTO builds once we've fixed the intermittent
# link failures (crbug.com/513074).
['linux_use_gold_flags==1 and use_lto==0', {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [

34
deps/v8/include/v8-platform.h

@ -19,6 +19,20 @@ class Task {
virtual void Run() = 0;
};
/**
* An IdleTask represents a unit of work to be performed in idle time.
* The Run method is invoked with an argument that specifies the deadline in
* seconds returned by MonotonicallyIncreasingTime().
* The idle task is expected to complete by this deadline.
*/
class IdleTask {
public:
virtual ~IdleTask() {}
virtual void Run(double deadline_in_seconds) = 0;
};
/**
* V8 Platform abstraction layer.
*
@ -63,8 +77,26 @@ class Platform {
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) {
double delay_in_seconds) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
* |isolate| when the embedder is idle.
* Requires that SupportsIdleTasks(isolate) is true.
* Idle tasks may be reordered relative to other task types and may be
* starved for an arbitrarily long time if no idle time is available.
* The definition of "foreground" is opaque to V8.
*/
virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
}
/**
* Returns true if idle tasks are enabled for the given |isolate|.
*/
virtual bool IdleTasksEnabled(Isolate* isolate) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
return false;
}
/**

7
deps/v8/include/v8-util.h

@ -133,6 +133,8 @@ class DefaultGlobalMapTraits : public StdMapTraits<K, V> {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) {}
static void OnWeakCallback(
const WeakCallbackInfo<WeakCallbackDataType>& data) {}
static void Dispose(Isolate* isolate, Global<V> value, K key) {}
// This is a second pass callback, so SetSecondPassCallback cannot be called.
static void DisposeWeak(const WeakCallbackInfo<WeakCallbackDataType>& data) {}
@ -452,7 +454,7 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
: WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), FirstWeakCallback,
Traits::WeakCallbackParameter(this, key, value), OnWeakCallback,
callback_type);
}
PersistentContainerValue old_value =
@ -471,12 +473,13 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
}
private:
static void FirstWeakCallback(
static void OnWeakCallback(
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
auto map = Traits::MapFromWeakCallbackInfo(data);
K key = Traits::KeyFromWeakCallbackInfo(data);
map->RemoveWeak(key);
Traits::OnWeakCallback(data);
data.SetSecondPassCallback(SecondWeakCallback);
}
}

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 103
#define V8_PATCH_LEVEL 35
#define V8_MINOR_VERSION 6
#define V8_BUILD_NUMBER 85
#define V8_PATCH_LEVEL 23
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

167
deps/v8/include/v8.h

@ -509,6 +509,10 @@ template <class T> class PersistentBase {
V8_INLINE bool IsEmpty() const { return val_ == NULL; }
V8_INLINE void Empty() { val_ = 0; }
V8_INLINE Local<T> Get(Isolate* isolate) const {
return Local<T>::New(isolate, *this);
}
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
@ -634,8 +638,8 @@ template <class T> class PersistentBase {
friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
PersistentBase(PersistentBase& other) = delete; // NOLINT
void operator=(PersistentBase&) = delete;
PersistentBase(const PersistentBase& other) = delete; // NOLINT
void operator=(const PersistentBase&) = delete;
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@ -841,8 +845,8 @@ class Global : public PersistentBase<T> {
private:
template <class F>
friend class ReturnValue;
Global(Global&) = delete;
void operator=(Global&) = delete;
Global(const Global&) = delete;
void operator=(const Global&) = delete;
V8_INLINE T* operator*() const { return this->val_; }
};
@ -1110,11 +1114,6 @@ class V8_EXPORT Script {
* Returns the corresponding context-unbound script.
*/
Local<UnboundScript> GetUnboundScript();
V8_DEPRECATED("Use GetUnboundScript()->GetId()",
int GetId()) {
return GetUnboundScript()->GetId();
}
};
@ -1386,15 +1385,13 @@ class V8_EXPORT ScriptCompiler {
/**
* Compile an ES6 module.
*
* This is an experimental feature.
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes.
* Only parsing works at the moment. Do not use.
*
* TODO(adamk): Script is likely the wrong return value for this;
* should return some new Module type.
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Script> CompileModule(Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> CompileModule(
Local<Context> context, Source* source,
CompileOptions options = kNoCompileOptions);
@ -3004,8 +3001,9 @@ class V8_EXPORT Map : public Object {
* in the same manner as the array returned from AsArray().
* Guaranteed to be side-effect free if the array contains no holes.
*/
static V8_WARN_UNUSED_RESULT MaybeLocal<Map> FromArray(Local<Context> context,
Local<Array> array);
static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
"Use mutation methods instead",
MaybeLocal<Map> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Map* Cast(Value* obj);
@ -3043,8 +3041,9 @@ class V8_EXPORT Set : public Object {
* Creates a new Set containing the items in array.
* Guaranteed to be side-effect free if the array contains no holes.
*/
static V8_WARN_UNUSED_RESULT MaybeLocal<Set> FromArray(Local<Context> context,
Local<Array> array);
static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
"Use mutation methods instead",
MaybeLocal<Set> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Set* Cast(Value* obj);
@ -4804,12 +4803,6 @@ class V8_EXPORT ResourceConstraints {
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit);
// Deprecated, will be removed soon.
V8_DEPRECATED("Use two-args version instead",
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit,
uint32_t number_of_processors));
int max_semi_space_size() const { return max_semi_space_size_; }
void set_max_semi_space_size(int value) { max_semi_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
@ -4819,14 +4812,6 @@ class V8_EXPORT ResourceConstraints {
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
V8_DEPRECATED("Unused, will be removed", int max_available_threads() const) {
return max_available_threads_;
}
// Set the number of threads available to V8, assuming at least 1.
V8_DEPRECATED("Unused, will be removed",
void set_max_available_threads(int value)) {
max_available_threads_ = value;
}
size_t code_range_size() const { return code_range_size_; }
void set_code_range_size(size_t value) {
code_range_size_ = value;
@ -4837,7 +4822,6 @@ class V8_EXPORT ResourceConstraints {
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
int max_available_threads_;
size_t code_range_size_;
};
@ -4967,27 +4951,35 @@ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context);
// --- Garbage Collection Callbacks ---
/**
* Applications can register callback functions which will be called
* before and after a garbage collection. Allocations are not
* allowed in the callback functions, you therefore cannot manipulate
* objects (set or delete properties for example) since it is possible
* such operations will result in the allocation of objects.
* Applications can register callback functions which will be called before and
* after certain garbage collection operations. Allocations are not allowed in
* the callback functions, you therefore cannot manipulate objects (set or
* delete properties for example) since it is possible such operations will
* result in the allocation of objects.
*/
enum GCType {
kGCTypeScavenge = 1 << 0,
kGCTypeMarkSweepCompact = 1 << 1,
kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
kGCTypeIncrementalMarking = 1 << 2,
kGCTypeProcessWeakCallbacks = 1 << 3,
kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
};
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
kGCCallbackFlagCompacted = 1 << 0,
kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
kGCCallbackFlagForced = 1 << 2
kGCCallbackFlagForced = 1 << 2,
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
};
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCPrologueCallback)(GCType type,
GCCallbackFlags flags));
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCEpilogueCallback)(GCType type,
GCCallbackFlags flags));
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
typedef void (*InterruptCallback)(Isolate* isolate, void* data);
@ -5370,8 +5362,6 @@ class V8_EXPORT Isolate {
*/
static Isolate* New(const CreateParams& params);
static V8_DEPRECATED("Always pass CreateParams", Isolate* New());
/**
* Returns the entered isolate for the current thread or NULL in
* case there is no current isolate.
@ -5380,19 +5370,6 @@ class V8_EXPORT Isolate {
*/
static Isolate* GetCurrent();
/**
* Custom callback used by embedders to help V8 determine if it should abort
* when it throws and no internal handler is predicted to catch the
* exception. If --abort-on-uncaught-exception is used on the command line,
* then V8 will abort if either:
* - no custom callback is set.
* - the custom callback set returns true.
* Otherwise, the custom callback will not be called and V8 will not abort.
*/
typedef bool (*AbortOnUncaughtExceptionCallback)(Isolate*);
void SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback);
/**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
@ -5578,12 +5555,16 @@ class V8_EXPORT Isolate {
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
typedef void (*GCPrologueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags);
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCPrologueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags));
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCEpilogueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags));
typedef void (*GCCallback)(Isolate* isolate, GCType type,
GCCallbackFlags flags);
/**
* Enables the host application to receive a notification before a
@ -5594,14 +5575,14 @@ class V8_EXPORT Isolate {
* not possible to register the same callback function two times with
* different GCType filters.
*/
void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
void RemoveGCPrologueCallback(GCPrologueCallback callback);
void RemoveGCPrologueCallback(GCCallback callback);
/**
* Enables the host application to receive a notification after a
@ -5612,15 +5593,14 @@ class V8_EXPORT Isolate {
* not possible to register the same callback function two times with
* different GCType filters.
*/
void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
void RemoveGCEpilogueCallback(GCCallback callback);
/**
* Forcefully terminate the current thread of JavaScript execution
@ -5984,16 +5964,6 @@ class V8_EXPORT V8 {
"Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that));
/**
* Set allocator to use for ArrayBuffer memory.
* The allocator should be set only once. The allocator should be set
* before any code tha uses ArrayBuffers is executed.
* This allocator is used in all isolates.
*/
static V8_DEPRECATE_SOON(
"Use isolate version",
void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator));
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@ -6087,7 +6057,7 @@ class V8_EXPORT V8 {
*/
static V8_DEPRECATE_SOON(
"Use isolate version",
void AddGCPrologueCallback(GCPrologueCallback callback,
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
@ -6096,7 +6066,7 @@ class V8_EXPORT V8 {
*/
V8_INLINE static V8_DEPRECATE_SOON(
"Use isolate version",
void RemoveGCPrologueCallback(GCPrologueCallback callback));
void RemoveGCPrologueCallback(GCCallback callback));
/**
* Enables the host application to receive a notification after a
@ -6110,7 +6080,7 @@ class V8_EXPORT V8 {
*/
static V8_DEPRECATE_SOON(
"Use isolate version",
void AddGCEpilogueCallback(GCEpilogueCallback callback,
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
@ -6119,7 +6089,7 @@ class V8_EXPORT V8 {
*/
V8_INLINE static V8_DEPRECATE_SOON(
"Use isolate version",
void RemoveGCEpilogueCallback(GCEpilogueCallback callback));
void RemoveGCEpilogueCallback(GCCallback callback));
/**
* Enables the host application to provide a mechanism to be notified
@ -6664,10 +6634,12 @@ class V8_EXPORT Context {
V8_INLINE Local<Value> GetEmbedderData(int index);
/**
* Gets the exports object used by V8 extras. Extra natives get a reference
* to this object and can use it to export functionality.
* Gets the binding object used by V8 extras. Extra natives get a reference
* to this object and can use it to "export" functionality by adding
* properties. Extra natives can also "import" functionality by accessing
* properties added by the embedder using the V8 API.
*/
Local<Object> GetExtrasExportsObject();
Local<Object> GetExtrasBindingObject();
/**
* Sets the embedder data with the given index, growing the data as
@ -6719,6 +6691,11 @@ class V8_EXPORT Context {
*/
void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
/**
* Estimate the memory in bytes retained by this context.
*/
size_t EstimatedSize();
/**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
@ -6966,12 +6943,12 @@ class Internals {
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 4 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 81;
static const int kContextEmbedderDataIndex = 27;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@ -7004,7 +6981,7 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsPartiallyDependentShift = 4;
static const int kJSObjectType = 0xbe;
static const int kJSObjectType = 0xb6;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@ -8291,17 +8268,17 @@ void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
void V8::RemoveGCPrologueCallback(GCCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCPrologueCallback(
reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
reinterpret_cast<v8::Isolate::GCCallback>(callback));
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
void V8::RemoveGCEpilogueCallback(GCCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCEpilogueCallback(
reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
reinterpret_cast<v8::Isolate::GCCallback>(callback));
}

1
deps/v8/infra/project-config/README.md

@ -1 +0,0 @@
This directory contains v8 project-wide configurations for infra services.

23
deps/v8/infra/project-config/cr-buildbucket.cfg

@ -1,23 +0,0 @@
# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
#
# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
# schema of this file and documentation.
#
# Please keep this list sorted by bucket name.
buckets {
name: "master.tryserver.v8"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "service-account-cq"
}
acls {
role: WRITER
group: "service-account-v8-master"
}
}

12
deps/v8/samples/process.cc

@ -666,11 +666,13 @@ StringHttpRequest kSampleRequests[kSampleSize] = {
};
bool ProcessEntries(HttpRequestProcessor* processor, int count,
StringHttpRequest* reqs) {
bool ProcessEntries(v8::Platform* platform, HttpRequestProcessor* processor,
int count, StringHttpRequest* reqs) {
for (int i = 0; i < count; i++) {
if (!processor->Process(&reqs[i]))
return false;
bool result = processor->Process(&reqs[i]);
while (v8::platform::PumpMessageLoop(platform, Isolate::GetCurrent()))
continue;
if (!result) return false;
}
return true;
}
@ -714,7 +716,7 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
if (!ProcessEntries(&processor, kSampleSize, kSampleRequests))
if (!ProcessEntries(platform, &processor, kSampleSize, kSampleRequests))
return 1;
PrintMap(&output);
}

24
deps/v8/samples/shell.cc

@ -45,8 +45,9 @@
v8::Local<v8::Context> CreateShellContext(v8::Isolate* isolate);
void RunShell(v8::Local<v8::Context> context);
int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
void RunShell(v8::Local<v8::Context> context, v8::Platform* platform);
int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
char* argv[]);
bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
v8::Local<v8::Value> name, bool print_result,
bool report_exceptions);
@ -95,8 +96,8 @@ int main(int argc, char* argv[]) {
return 1;
}
v8::Context::Scope context_scope(context);
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
result = RunMain(isolate, platform, argc, argv);
if (run_shell) RunShell(context, platform);
}
isolate->Dispose();
v8::V8::Dispose();
@ -270,7 +271,8 @@ v8::MaybeLocal<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
// Process remaining command line arguments and execute files
int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
int RunMain(v8::Isolate* isolate, v8::Platform* platform, int argc,
char* argv[]) {
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
@ -293,7 +295,9 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
.ToLocal(&source)) {
return 1;
}
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
bool success = ExecuteString(isolate, source, file_name, false, true);
while (v8::platform::PumpMessageLoop(platform, isolate)) continue;
if (!success) return 1;
} else {
// Use all other arguments as names of files to load and run.
v8::Local<v8::String> file_name =
@ -304,7 +308,9 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
}
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
bool success = ExecuteString(isolate, source, file_name, false, true);
while (v8::platform::PumpMessageLoop(platform, isolate)) continue;
if (!success) return 1;
}
}
return 0;
@ -312,7 +318,7 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Local<v8::Context> context) {
void RunShell(v8::Local<v8::Context> context, v8::Platform* platform) {
fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
@ -331,6 +337,8 @@ void RunShell(v8::Local<v8::Context> context) {
v8::String::NewFromUtf8(context->GetIsolate(), str,
v8::NewStringType::kNormal).ToLocalChecked(),
name, true, true);
while (v8::platform::PumpMessageLoop(platform, context->GetIsolate()))
continue;
}
fprintf(stderr, "\n");
}

12
deps/v8/src/DEPS

@ -2,11 +2,23 @@ include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"-src/interpreter",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
"-src/libplatform",
"-include/libplatform"
]
specific_include_rules = {
".*\.h": [
# Note that src/v8.h is the top header for some .cc files, it shouldn't be
# included in any .h files though. In the long run we should make src/v8.h
# act like any normal header file, instead of a grab-bag include.
"-src/v8.h",
],
"d8\.cc": [
"+include/libplatform/libplatform.h",
],

45
deps/v8/src/accessors.cc

@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/accessors.h"
#include "src/api.h"
#include "src/contexts.h"
#include "src/deoptimizer.h"
@ -100,37 +99,22 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
case JS_TYPED_ARRAY_TYPE: {
if (!CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) &&
!CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) &&
!CheckForName(name, isolate->factory()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset)) {
case JS_TYPED_ARRAY_TYPE:
// %TypedArray%.prototype is non-configurable, and so are the following
// named properties on %TypedArray%.prototype, so we can directly inline
// the field-load for typed array maps that still use their
// %TypedArray%.prototype.
if (JSFunction::cast(map->GetConstructor())->prototype() !=
map->prototype()) {
return false;
}
return CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset);
if (map->is_dictionary_map()) return false;
// Check if the property is overridden on the instance.
DescriptorArray* descriptors = map->instance_descriptors();
int descriptor = descriptors->SearchWithCache(*name, *map);
if (descriptor != DescriptorArray::kNotFound) return false;
Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
if (!proto->IsJSReceiver()) return false;
// Check if the property is defined in the prototype chain.
LookupIterator it(proto, name);
if (!it.IsFound()) return false;
Object* original_proto =
JSFunction::cast(map->GetConstructor())->prototype();
// Property is not configurable. It is enough to verify that
// the holder is the same.
return *it.GetHolder<Object>() == original_proto;
}
case JS_DATA_VIEW_TYPE:
return CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
@ -1012,7 +996,6 @@ MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
it.ReconfigureDataProperty(value, it.property_details().attributes());
it.WriteDataValue(value);
if (is_observed && !old_value->SameValue(*value)) {
return JSObject::EnqueueChangeRecord(object, "update", name, old_value);

6
deps/v8/src/accessors.h

@ -5,12 +5,18 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
#include "include/v8.h"
#include "src/allocation.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/property-details.h"
namespace v8 {
namespace internal {
// Forward declarations.
class ExecutableAccessorInfo;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_INFO_LIST(V) \

2
deps/v8/src/allocation-tracker.cc

@ -6,7 +6,7 @@
#include "src/allocation-tracker.h"
#include "src/frames-inl.h"
#include "src/heap-snapshot-generator.h"
#include "src/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {

13
deps/v8/src/allocation-tracker.h

@ -7,12 +7,21 @@
#include <map>
#include "include/v8-profiler.h"
#include "src/handles.h"
#include "src/hashmap.h"
#include "src/list.h"
#include "src/vector.h"
namespace v8 {
namespace internal {
class HeapObjectsMap;
// Forward declarations.
class AllocationTraceTree;
class AllocationTracker;
class HeapObjectsMap;
class SharedFunctionInfo;
class StringsStorage;
class AllocationTraceNode {
public:

5
deps/v8/src/api-natives.h

@ -6,10 +6,15 @@
#define V8_API_NATIVES_H_
#include "src/handles.h"
#include "src/property-details.h"
namespace v8 {
namespace internal {
// Forward declarations.
class ObjectTemplateInfo;
class TemplateInfo;
class ApiNatives {
public:
static const int kInitialFunctionCacheSize = 256;

384
deps/v8/src/api.cc

@ -20,17 +20,18 @@
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/context-measure.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
#include "src/heap/spaces.h"
#include "src/heap-profiler.h"
#include "src/heap-snapshot-generator-inl.h"
#include "src/icu_util.h"
@ -51,6 +52,7 @@
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
#include "src/unicode-inl.h"
#include "src/v8.h"
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@ -368,14 +370,12 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
base::ElapsedTimer timer;
timer.Start();
Isolate::Scope isolate_scope(isolate);
internal_isolate->set_creating_default_snapshot(true);
internal_isolate->Init(NULL);
Persistent<Context> context;
i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
Local<Context> new_context = Context::New(isolate);
internal_isolate->set_creating_default_snapshot(false);
context.Reset(isolate, new_context);
if (custom_source != NULL) {
metadata.set_embeds_script(true);
@ -384,16 +384,31 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
}
}
if (!context.IsEmpty()) {
// Make sure all builtin scripts are cached.
{
HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
internal_isolate->bootstrapper()->SourceLookup<i::Natives>(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
// GC may have cleared weak cells, so compact any WeakFixedArrays
// found on the heap.
i::HeapIterator iterator(internal_isolate->heap(),
i::HeapIterator::kFilterUnreachable);
for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
i::Object* prototype_users =
i::PrototypeInfo::cast(o)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
}
} else if (o->IsScript()) {
i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
if (shared_list->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
array->Compact<i::WeakFixedArray::NullCallback>();
}
}
}
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Reset();
@ -481,15 +496,8 @@ ResourceConstraints::ResourceConstraints()
max_old_space_size_(0),
max_executable_size_(0),
stack_limit_(NULL),
max_available_threads_(0),
code_range_size_(0) { }
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit,
uint32_t number_of_processors) {
ConfigureDefaults(physical_memory, virtual_memory_limit);
}
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
#if V8_OS_ANDROID
@ -783,6 +791,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
bool can_grow,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
i::Isolate* isolate = env->GetIsolate();
bool ok =
Utils::ApiCheck(env->IsNativeContext(),
location,
@ -795,7 +804,8 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
return i::Handle<i::FixedArray>();
}
int new_size = i::Max(index, data->length() << 1) + 1;
data = i::FixedArray::CopySize(data, new_size);
int grow_by = new_size - data->length();
data = isolate->factory()->CopyFixedArrayAndGrow(data, grow_by);
env->set_embedder_data(*data);
return data;
}
@ -1813,13 +1823,6 @@ MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
}
Local<Script> ScriptCompiler::CompileModule(Isolate* v8_isolate, Source* source,
CompileOptions options) {
auto context = v8_isolate->GetCurrentContext();
RETURN_TO_LOCAL_UNCHECKED(CompileModule(context, source, options), Script);
}
class IsIdentifierHelper {
public:
IsIdentifierHelper() : is_identifier_(false), first_char_(true) {}
@ -1916,11 +1919,27 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
context = factory->NewWithContext(closure, context, extension);
}
i::Handle<i::Object> name_obj;
int line_offset = 0;
int column_offset = 0;
if (!source->resource_name.IsEmpty()) {
name_obj = Utils::OpenHandle(*(source->resource_name));
}
if (!source->resource_line_offset.IsEmpty()) {
line_offset = static_cast<int>(source->resource_line_offset->Value());
}
if (!source->resource_column_offset.IsEmpty()) {
column_offset = static_cast<int>(source->resource_column_offset->Value());
}
i::Handle<i::JSFunction> fun;
has_pending_exception =
!i::Compiler::GetFunctionFromEval(
source_string, outer_info, context, i::SLOPPY,
i::ONLY_SINGLE_FUNCTION_LITERAL, scope_position).ToHandle(&fun);
has_pending_exception = !i::Compiler::GetFunctionFromEval(
source_string, outer_info, context, i::SLOPPY,
i::ONLY_SINGLE_FUNCTION_LITERAL, line_offset,
column_offset - scope_position, name_obj,
source->resource_options).ToHandle(&fun);
if (has_pending_exception) {
isolate->ReportPendingMessages();
}
RETURN_ON_FAILED_EXECUTION(Function);
i::Handle<i::Object> result;
@ -1983,11 +2002,11 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
source->info->function() == nullptr);
source->info->literal() == nullptr);
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::SharedFunctionInfo> result;
if (source->info->function() != nullptr) {
if (source->info->literal() != nullptr) {
// Parsing has succeeded.
result = i::Compiler::CompileStreamedScript(script, source->info.get(),
str->length());
@ -2256,31 +2275,15 @@ v8::Local<v8::StackTrace> Message::GetStackTrace() const {
}
MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
i::Isolate* isolate, const char* name, i::Handle<i::Object> recv, int argc,
i::Handle<i::Object> argv[]) {
i::Handle<i::Object> object_fun =
i::Object::GetProperty(
isolate, isolate->js_builtins_object(), name).ToHandleChecked();
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(object_fun);
return i::Execution::Call(isolate, fun, recv, argc, argv);
}
MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
i::Isolate* isolate, const char* name, i::Handle<i::Object> data) {
i::Handle<i::Object> argv[] = { data };
return CallV8HeapFunction(isolate, name, isolate->js_builtins_object(),
arraysize(argv), argv);
}
Maybe<int> Message::GetLineNumber(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
i::Handle<i::JSFunction> fun = isolate->message_get_line_number();
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetLineNumber",
Utils::OpenHandle(this)).ToHandle(&result);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
return Just(static_cast<int>(result->Number()));
}
@ -2307,13 +2310,15 @@ int Message::GetEndPosition() const {
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetStartColumn()",
int);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> start_col_obj;
i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
.ToHandle(&start_col_obj);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
return Just(static_cast<int>(start_col_obj->Number()));
return Just(static_cast<int>(result->Number()));
}
@ -2325,16 +2330,19 @@ int Message::GetStartColumn() const {
Maybe<int> Message::GetEndColumn(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> start_col_obj;
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> args[] = {self};
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
.ToHandle(&start_col_obj);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
int start = self->start_position();
int end = self->end_position();
return Just(static_cast<int>(start_col_obj->Number()) + (end - start));
return Just(static_cast<int>(result->Number()) + (end - start));
}
@ -2368,10 +2376,13 @@ bool Message::IsOpaque() const {
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
i::Handle<i::JSFunction> fun = isolate->message_get_source_line();
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetSourceLine",
Utils::OpenHandle(this)).ToHandle(&result);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(String);
Local<String> str;
if (result->IsString()) {
@ -2787,34 +2798,23 @@ bool Value::IsUint32() const {
}
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
i::Handle<i::Object> constr(obj->map()->GetConstructor(), isolate);
if (!constr->IsJSFunction()) return false;
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(constr);
return func->shared()->native() && constr.is_identical_to(
i::Object::GetProperty(isolate,
isolate->js_builtins_object(),
class_name).ToHandleChecked());
}
bool Value::IsNativeError() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsJSObject()) {
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
i::Isolate* isolate = js_obj->GetIsolate();
return CheckConstructor(isolate, js_obj, "$Error") ||
CheckConstructor(isolate, js_obj, "$EvalError") ||
CheckConstructor(isolate, js_obj, "$RangeError") ||
CheckConstructor(isolate, js_obj, "$ReferenceError") ||
CheckConstructor(isolate, js_obj, "$SyntaxError") ||
CheckConstructor(isolate, js_obj, "$TypeError") ||
CheckConstructor(isolate, js_obj, "$URIError");
} else {
return false;
}
if (!obj->IsJSObject()) return false;
i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
i::Isolate* isolate = js_obj->GetIsolate();
i::Handle<i::Object> constructor(js_obj->map()->GetConstructor(), isolate);
if (!constructor->IsJSFunction()) return false;
i::Handle<i::JSFunction> function =
i::Handle<i::JSFunction>::cast(constructor);
if (!function->shared()->native()) return false;
return function.is_identical_to(isolate->error_function()) ||
function.is_identical_to(isolate->eval_error_function()) ||
function.is_identical_to(isolate->range_error_function()) ||
function.is_identical_to(isolate->reference_error_function()) ||
function.is_identical_to(isolate->syntax_error_function()) ||
function.is_identical_to(isolate->type_error_function()) ||
function.is_identical_to(isolate->uri_error_function());
}
@ -3372,9 +3372,11 @@ Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
}
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Value::Equals()", bool);
i::Handle<i::Object> args[] = { other };
i::Handle<i::JSFunction> fun(i::JSFunction::cast(
isolate->js_builtins_object()->javascript_builtin(i::Builtins::EQUALS)));
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "EQUALS", self, arraysize(args), args)
!i::Execution::Call(isolate, fun, self, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(*result == i::Smi::FromInt(i::EQUAL));
@ -3397,33 +3399,9 @@ bool Value::Equals(Local<Value> that) const {
bool Value::StrictEquals(Local<Value> that) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
if (obj->IsSmi()) {
return other->IsNumber() && obj->Number() == other->Number();
}
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
LOG_API(isolate, "StrictEquals");
// Must check HeapNumber first, since NaN !== NaN.
if (obj->IsHeapNumber()) {
if (!other->IsNumber()) return false;
double x = obj->Number();
double y = other->Number();
// Must check explicitly for NaN:s on Windows, but -0 works fine.
return x == y && !std::isnan(x) && !std::isnan(y);
} else if (*obj == *other) { // Also covers Booleans.
return true;
} else if (obj->IsSmi()) {
return other->IsNumber() && obj->Number() == other->Number();
} else if (obj->IsString()) {
return other->IsString() &&
i::String::Equals(i::Handle<i::String>::cast(obj),
i::Handle<i::String>::cast(other));
} else if (obj->IsUndefined() || obj->IsUndetectableObject()) {
return other->IsUndefined() || other->IsUndetectableObject();
} else {
return false;
}
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
return self->StrictEquals(*other);
}
@ -3459,8 +3437,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
i::JSReceiver::SetElement(self, index, value_obj, i::SLOPPY).is_null();
has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
i::SLOPPY).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@ -3528,11 +3506,12 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
i::Handle<i::JSArray> desc_array =
isolate->factory()->NewJSArrayWithElements(desc, i::FAST_ELEMENTS, 3);
i::Handle<i::Object> args[] = {self, key_obj, value_obj, desc_array};
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::JSFunction> fun = isolate->object_define_own_property();
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$objectDefineOwnProperty",
isolate->factory()->undefined_value(),
arraysize(args), args).ToHandle(&result);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->BooleanValue());
}
@ -3664,11 +3643,12 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
auto obj = Utils::OpenHandle(this);
auto key_name = Utils::OpenHandle(*key);
i::Handle<i::Object> args[] = { obj, key_name };
i::Handle<i::JSFunction> fun = isolate->object_get_own_property_descriptor();
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "$objectGetOwnPropertyDescriptor",
isolate->factory()->undefined_value(),
arraysize(args), args).ToHandle(&result);
!i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@ -3798,7 +3778,8 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
isolate, self, toStringTag).ToHandle(&tag);
RETURN_ON_FAILED_EXECUTION(String);
if (tag->IsString()) {
class_name = i::Handle<i::String>::cast(tag).EscapeFrom(&handle_scope);
class_name = Utils::OpenHandle(*handle_scope.Escape(
Utils::ToLocal(i::Handle<i::String>::cast(tag))));
}
}
const char* prefix = "[object ";
@ -5123,7 +5104,6 @@ static inline int WriteHelper(const String* string,
ENTER_V8(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
isolate->string_tracker()->RecordWrite(str);
if (options & String::HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringCharacterStream or Get(i) to access the characters.
@ -5360,15 +5340,6 @@ void v8::V8::SetReturnAddressLocationResolver(
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
void v8::V8::SetArrayBufferAllocator(
ArrayBuffer::Allocator* allocator) {
if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
"v8::V8::SetArrayBufferAllocator",
"ArrayBufferAllocator might only be set once"))
return;
i::V8::SetArrayBufferAllocator(allocator);
}
bool v8::V8::Dispose() {
i::V8::TearDown();
@ -5557,11 +5528,11 @@ void Context::DetachGlobal() {
}
Local<v8::Object> Context::GetExtrasExportsObject() {
Local<v8::Object> Context::GetExtrasBindingObject() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
i::Handle<i::JSObject> exports(context->extras_exports_object(), isolate);
return Utils::ToLocal(exports);
i::Handle<i::JSObject> binding(context->extras_binding_object(), isolate);
return Utils::ToLocal(binding);
}
@ -5587,6 +5558,12 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
}
size_t Context::EstimatedSize() {
return static_cast<size_t>(
i::ContextMeasure(*Utils::OpenHandle(this)).Size());
}
MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::ObjectTemplate::NewInstance()", Object);
auto self = Utils::OpenHandle(this);
@ -5850,9 +5827,6 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
return false; // Already an external string.
}
ENTER_V8(isolate);
if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
return false;
}
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@ -5877,9 +5851,6 @@ bool v8::String::MakeExternal(
return false; // Already an external string.
}
ENTER_V8(isolate);
if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
return false;
}
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@ -5900,9 +5871,10 @@ bool v8::String::CanMakeExternal() {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
// Old space strings should be externalized.
if (!isolate->heap()->new_space()->Contains(*obj)) return true;
int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kShortSize) return false;
if (size <= i::ExternalString::kShortSize) return false;
i::StringShape shape(*obj);
return !shape.IsExternal();
}
@ -6587,6 +6559,8 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length,
ArrayBufferCreationMode mode) {
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
@ -6784,6 +6758,8 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, void* data, size_t byte_length,
ArrayBufferCreationMode mode) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "v8::SharedArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
@ -6896,8 +6872,31 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
void Isolate::CollectAllGarbage(const char* gc_reason) {
reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
i::Heap::kNoGCFlags, gc_reason);
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
DCHECK_EQ(heap->gc_state(), i::Heap::NOT_IN_GC);
if (heap->incremental_marking()->IsStopped()) {
if (heap->incremental_marking()->CanBeActivated()) {
heap->StartIncrementalMarking(
i::Heap::kNoGCFlags,
kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
} else {
heap->CollectAllGarbage(
i::Heap::kNoGCFlags, gc_reason,
kGCCallbackFlagSynchronousPhantomCallbackProcessing);
}
} else {
// Incremental marking is turned on an has already been started.
// TODO(mlippautz): Compute the time slice for incremental marking based on
// memory pressure.
double deadline = heap->MonotonicallyIncreasingTimeInMs() +
i::FLAG_external_allocation_limit_incremental_time;
heap->AdvanceIncrementalMarking(
0, deadline, i::IncrementalMarking::StepActions(
i::IncrementalMarking::GC_VIA_STACK_GUARD,
i::IncrementalMarking::FORCE_MARKING,
i::IncrementalMarking::FORCE_COMPLETION));
}
}
@ -6989,47 +6988,41 @@ void Isolate::SetReference(internal::Object** parent,
}
void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
GCType gc_type) {
void Isolate::AddGCPrologueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
void Isolate::RemoveGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->RemoveGCPrologueCallback(callback);
}
void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
GCType gc_type) {
void Isolate::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
}
void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
void V8::AddGCPrologueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->AddGCPrologueCallback(
reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
gc_type,
false);
reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
void V8::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->AddGCEpilogueCallback(
reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
gc_type,
false);
reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
@ -7096,20 +7089,11 @@ Isolate* Isolate::GetCurrent() {
}
Isolate* Isolate::New() {
Isolate::CreateParams create_params;
return New(create_params);
}
Isolate* Isolate::New(const Isolate::CreateParams& params) {
i::Isolate* isolate = new i::Isolate(false);
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
if (params.array_buffer_allocator != NULL) {
isolate->set_array_buffer_allocator(params.array_buffer_allocator);
} else {
isolate->set_array_buffer_allocator(i::V8::ArrayBufferAllocator());
}
CHECK(params.array_buffer_allocator != NULL);
isolate->set_array_buffer_allocator(params.array_buffer_allocator);
if (params.snapshot_blob != NULL) {
isolate->set_snapshot_blob(params.snapshot_blob);
} else {
@ -7176,13 +7160,6 @@ void Isolate::Exit() {
}
void Isolate::SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetAbortOnUncaughtExceptionCallback(callback);
}
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@ -7616,26 +7593,27 @@ String::Value::~Value() {
}
#define DEFINE_ERROR(NAME) \
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
LOG_API(isolate, #NAME); \
ENTER_V8(isolate); \
i::Object* error; \
{ \
i::HandleScope scope(isolate); \
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
error = *isolate->factory()->NewError("$" #NAME, message); \
} \
i::Handle<i::Object> result(error, isolate); \
return Utils::ToLocal(result); \
}
DEFINE_ERROR(RangeError)
DEFINE_ERROR(ReferenceError)
DEFINE_ERROR(SyntaxError)
DEFINE_ERROR(TypeError)
DEFINE_ERROR(Error)
#define DEFINE_ERROR(NAME, name) \
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
LOG_API(isolate, #NAME); \
ENTER_V8(isolate); \
i::Object* error; \
{ \
i::HandleScope scope(isolate); \
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
i::Handle<i::JSFunction> constructor = isolate->name##_function(); \
error = *isolate->factory()->NewError(constructor, message); \
} \
i::Handle<i::Object> result(error, isolate); \
return Utils::ToLocal(result); \
}
DEFINE_ERROR(RangeError, range_error)
DEFINE_ERROR(ReferenceError, reference_error)
DEFINE_ERROR(SyntaxError, syntax_error)
DEFINE_ERROR(TypeError, type_error)
DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR

82
deps/v8/src/api.h

@ -5,13 +5,12 @@
#ifndef V8_API_H_
#define V8_API_H_
#include "src/v8.h"
#include "include/v8-testing.h"
#include "src/contexts.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/list.h"
#include "src/objects-inl.h"
namespace v8 {
@ -308,17 +307,6 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
};
template <class T>
v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
v8::EscapableHandleScope* scope) {
v8::internal::Handle<T> handle;
if (!is_null()) {
handle = *this;
}
return Utils::OpenHandle(*scope->Escape(Utils::ToLocal(handle)), true);
}
template <class T>
inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
return reinterpret_cast<T*>(obj.location());
@ -417,72 +405,6 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
namespace internal {
// Tracks string usage to help make better decisions when
// externalizing strings.
//
// Implementation note: internally this class only tracks fresh
// strings and keeps a single use counter for them.
class StringTracker {
public:
// Records that the given string's characters were copied to some
// external buffer. If this happens often we should honor
// externalization requests for the string.
void RecordWrite(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
if (IsFreshString(address, top)) {
IncrementUseCount(top);
}
}
// Estimates freshness and use frequency of the given string based
// on how close it is to the new space top and the recorded usage
// history.
inline bool IsFreshUnusedString(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
return IsFreshString(address, top) && IsUseCountLow(top);
}
private:
StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
static inline bool IsFreshString(Address string, Address top) {
return top - kFreshnessLimit <= string && string <= top;
}
inline bool IsUseCountLow(Address top) {
if (last_top_ != top) return true;
return use_count_ < kUseLimit;
}
inline void IncrementUseCount(Address top) {
if (last_top_ != top) {
use_count_ = 0;
last_top_ = top;
}
++use_count_;
}
// Single use counter shared by all fresh strings.
int use_count_;
// Last new space top when the use count above was valid.
Address last_top_;
Isolate* isolate_;
// How close to the new space top a fresh string has to be.
static const int kFreshnessLimit = 1024;
// The number of uses required to consider a string useful.
static const int kUseLimit = 32;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(StringTracker);
};
class DeferredHandles {
public:

4
deps/v8/src/arguments.cc

@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arguments.h"
#include "src/api.h"
#include "src/vm-state-inl.h"
namespace v8 {

6
deps/v8/src/arguments.h

@ -269,9 +269,6 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#endif
#define DECLARE_RUNTIME_FUNCTION(Name) \
Object* Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
@ -286,9 +283,6 @@ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
#define RUNTIME_ARGUMENTS(isolate, args) \
args.length(), args.arguments(), isolate
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_

55
deps/v8/src/arm/assembler-arm-inl.h

@ -40,7 +40,7 @@
#include "src/arm/assembler-arm.h"
#include "src/assembler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
namespace v8 {
@ -97,7 +97,7 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
}
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@ -272,19 +272,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
Address RelocInfo::call_address() {
Address RelocInfo::debug_call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset);
}
void RelocInfo::set_call_address(Address target) {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@ -293,23 +292,6 @@ void RelocInfo::set_call_address(Address target) {
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
Object** RelocInfo::call_object_address() {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@ -353,11 +335,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@ -380,11 +359,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@ -504,11 +480,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
Address Assembler::break_address_from_return_address(Address pc) {
return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {

69
deps/v8/src/arm/assembler-arm.cc

@ -34,8 +34,6 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
@ -1326,7 +1324,8 @@ int Assembler::branch_offset(Label* L) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
BlockConstPoolFor(1);
if (!is_const_pool_blocked()) BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@ -2573,6 +2572,12 @@ void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
uint32_t enc;
// If the embedded constant pool is disabled, we can use the normal, inline
// constant pool. If the embedded constant pool is enabled (via
// FLAG_enable_embedded_constant_pool), we can only use it where the pool
// pointer (pp) is valid.
bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
@ -2583,7 +2588,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
} else if (FLAG_enable_vldr_imm && can_use_pool) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@ -3588,11 +3593,10 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
// db is used to write raw data. The constant pool should be emitted or
// blocked before using db.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -3600,11 +3604,10 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
// dd is used to write raw data. The constant pool should be emitted or
// blocked before using dd.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@ -3612,11 +3615,10 @@ void Assembler::dd(uint32_t data) {
void Assembler::dq(uint64_t value) {
// No relocation info should be pending while using dq. dq is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
// dq is used to write raw data. The constant pool should be emitted or
// blocked before using dq.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@ -3755,11 +3757,13 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
num_pending_32_bit_constants_ * kPointerSize;
bool has_int_values = (num_pending_32_bit_constants_ > 0);
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align = IsAligned(
reinterpret_cast<intptr_t>(pc_ + size_up_to_marker), kDoubleAlignment);
require_64_bit_align =
!IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
kDoubleAlignment);
if (require_64_bit_align) {
estimated_size_after_marker += kInstrSize;
}
@ -3776,9 +3780,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
DCHECK(has_fp_values || has_int_values);
bool need_emit = false;
if (has_fp_values) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we can ignore the effect of the 32-bit constants on estimated_size.
int dist64 = pc_offset() + estimated_size -
num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
@ -3787,10 +3793,12 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
need_emit = true;
}
}
int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
if (has_int_values) {
int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
}
}
if (!need_emit) return;
}
@ -3839,7 +3847,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
bind(&size_check);
// Emit jump over constant pool if necessary.
if (require_jump) b(size - kPcLoadDelta);
Label after_pool;
if (require_jump) {
b(&after_pool);
}
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
@ -3923,6 +3934,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RecordComment("]");
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
if (after_pool.is_linked()) {
bind(&after_pool);
}
}
// Since a constant pool was just emitted, move the check offset forward by

25
deps/v8/src/arm/assembler-arm.h

@ -737,9 +737,6 @@ class Assembler : public AssemblerBase {
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
// Return the code target address of the patch debug break slot
INLINE(static Address break_address_from_return_address(Address pc));
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@ -758,30 +755,18 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Distance between start of patched return sequence and the emitted address
// to jump to.
// Patched return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
// Distance between start of patched debug break slot and the emitted address
// to jump to.
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
static const int kPatchDebugBreakSlotAddressOffset = 2 * kInstrSize;
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
static const int kJSReturnSequenceInstructions = 4;
static const int kJSReturnSequenceLength =
kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 3;
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@ -1354,11 +1339,11 @@ class Assembler : public AssemblerBase {
// Debugging
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.

437
deps/v8/src/arm/builtins-arm.cc

@ -2,14 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -311,39 +309,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
static void Generate_Runtime_NewObject(MacroAssembler* masm,
bool create_memento,
Register original_constructor,
Label* count_incremented,
Label* allocated) {
if (create_memento) {
// Get the cell or allocation site.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ push(r2);
}
__ push(r1); // argument for Runtime_NewObject
__ push(original_constructor); // original constructor
if (create_memento) {
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
} else {
__ CallRuntime(Runtime::kNewObject, 2);
}
__ mov(r4, r0);
// Runtime_NewObjectWithAllocationSite increments allocation count.
// Skip the increment.
if (create_memento) {
__ jmp(count_incremented);
} else {
__ jmp(allocated);
}
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -363,32 +330,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r2, r4);
__ push(r2);
}
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r2, r4);
__ push(r2);
__ SmiTag(r0);
__ push(r0);
__ push(r1);
if (use_new_target) {
__ push(r3);
}
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r1, r3);
__ b(eq, &normal_new);
// Original constructor and function are different.
Generate_Runtime_NewObject(masm, create_memento, r3, &count_incremented,
&allocated);
__ bind(&normal_new);
__ push(r3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
@ -396,11 +349,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &rt_call);
// Fall back to runtime if the original constructor and function differ.
__ cmp(r1, r3);
__ b(ne, &rt_call);
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ CompareObjectType(r2, r5, r4, MAP_TYPE);
__ b(ne, &rt_call);
// Check that the constructor is not constructing a JSFunction (see
@ -408,7 +365,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
if (!is_api_function) {
@ -439,12 +396,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
Label rt_call_reload_new_target;
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
if (create_memento) {
__ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
}
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
__ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@ -481,8 +439,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
__ Ubfx(r0, r0, Map::kInObjectPropertiesOrConstructorFunctionIndexByte *
kBitsPerByte,
kBitsPerByte);
__ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
__ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ sub(r0, r0, Operand(r2));
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
@ -509,7 +472,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Load the AllocationSite
__ ldr(r6, MemOperand(sp, 2 * kPointerSize));
__ ldr(r6, MemOperand(sp, 3 * kPointerSize));
__ AssertUndefinedOrAllocationSite(r6, r0);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
} else {
@ -518,104 +482,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
// failures need to undo the allocation, so that the heap is in a
// consistent state and verifiable.
// and jump into the continuation code at any time from now on.
__ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
// allocated object if not; allocate and initialize a FixedArray if yes.
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ add(r3, r3, Operand(r6));
__ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
kBitsPerByte);
__ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
__ Assert(pl, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
// r1: constructor
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ Allocate(
r0,
r5,
r6,
r2,
&undo_allocation,
static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
// Initialize the FixedArray.
// r1: constructor
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ SmiTag(r0, r3);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
// r1: constructor function
// r2: First element of FixedArray (not tagged)
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(r2, r6, r0);
// Store the initialized FixedArray into the properties field of
// the JSObject
// r1: constructor function
// r4: JSObject
// r5: FixedArray (not tagged)
__ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
__ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
// Continue with JSObject being successfully allocated
// r1: constructor function
// r4: JSObject
__ jmp(&allocated);
// Undo the setting of the new top so that the heap is verifiable. For
// example, the map's unused properties potentially do not match the
// allocated objects unused properties.
// r4: JSObject (previous new top)
__ bind(&undo_allocation);
__ UndoAllocationInNewSpace(r4, r5);
// Reload the original constructor and fall-through.
__ bind(&rt_call_reload_new_target);
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// r1: constructor function
// r3: original constructor
__ bind(&rt_call);
Generate_Runtime_NewObject(masm, create_memento, r1, &count_incremented,
&allocated);
if (create_memento) {
// Get the cell or allocation site.
__ ldr(r2, MemOperand(sp, 3 * kPointerSize));
__ push(r2); // argument 1: allocation site
}
__ push(r1); // argument 2/1: constructor function
__ push(r3); // argument 3/2: original constructor
if (create_memento) {
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
} else {
__ CallRuntime(Runtime::kNewObject, 2);
}
__ mov(r4, r0);
// Runtime_NewObjectWithAllocationSite increments allocation count.
// Skip the increment.
Label count_incremented;
if (create_memento) {
__ jmp(&count_incremented);
}
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
if (create_memento) {
int offset = (use_new_target ? 3 : 2) * kPointerSize;
__ ldr(r2, MemOperand(sp, offset));
__ ldr(r2, MemOperand(sp, 3 * kPointerSize));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@ -630,9 +540,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
if (use_new_target) {
__ pop(r3);
}
__ pop(r3);
__ pop(r1);
// Retrieve smi-tagged arguments count from the stack.
@ -641,9 +549,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
if (use_new_target) {
__ push(r3);
}
__ push(r3);
__ push(r4);
__ push(r4);
@ -657,8 +563,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: new.target (if used)
// sp[2/3]: number of arguments (smi-tagged)
// sp[2]: new.target
// sp[3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry);
@ -683,17 +589,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
// sp[1]: new.target
// sp[2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -703,9 +607,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: new.target
// sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@ -723,10 +627,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kPointerSize;
__ ldr(r1, MemOperand(sp, offset));
// sp[1]: new.target (original constructor)
// sp[2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
@ -739,17 +642,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, true, false);
}
@ -763,12 +661,12 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
// TODO(dslomov): support pretenuring
CHECK(!FLAG_pretenuring_call_new);
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
__ AssertUndefinedOrAllocationSite(r2, r4);
__ push(r2);
__ mov(r4, r0);
__ SmiTag(r4);
__ push(r4); // Smi-tagged arguments count.
@ -970,6 +868,147 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
// count expected by the function.
//
// The live registers are:
// o r1: the JS function object being called.
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
// o sp: stack pointer
// o lr: return address
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout.
// TODO(rmcilroy): We will need to include the current bytecode pointer in the
// frame.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
__ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ sub(r9, sp, Operand(r4));
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
Label loop_header;
Label loop_check;
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
__ b(&loop_check, al);
__ bind(&loop_header);
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
__ push(r9);
// Continue loop if not done.
__ bind(&loop_check);
__ sub(r4, r4, Operand(kPointerSize), SetCC);
__ b(&loop_header, ge);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
// - Deal with sloppy mode functions which need to replace the
// receiver with the global proxy when called as functions (without an
// explicit receiver object).
// - Code aging of the BytecodeArray object.
// - Supporting FLAG_trace.
//
// The following items are also not done here, and will probably be done using
// explicit bytecodes instead:
// - Allocating a new local context if applicable.
// - Setting up a local binding to the this function, which is used in
// derived constructors with super calls.
// - Setting new.target if required.
// - Dealing with REST parameters (only if
// https://codereview.chromium.org/1235153006 doesn't land by then).
// - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
__ CallRuntime(Runtime::kStackGuard, 0);
__ bind(&ok);
}
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ sub(kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's EmitReturnSequence.
// - Supporting FLAG_trace for Runtime::TraceExit.
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// The return value is in accumulator, which is already in r0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.
__ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
__ Jump(lr);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@ -1282,8 +1321,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ SmiTag(r0);
__ push(r0);
__ push(r2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(r0, r2);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ mov(r2, r0);
__ pop(r0);
@ -1396,6 +1436,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@ -1413,12 +1454,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ mov(slot, Operand(Smi::FromInt(index)));
__ Move(vector, feedback_vector);
int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
__ mov(slot, Operand(Smi::FromInt(slot_index)));
__ ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@ -1453,6 +1491,13 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
const int kVectorOffset =
InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
// Push the vector.
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
__ Push(r1);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@ -1467,10 +1512,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
@ -1519,8 +1562,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Convert the receiver to a regular object.
// r0: receiver
__ bind(&call_to_object);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ b(&push_receiver);
__ bind(&use_global_proxy);
@ -1533,8 +1576,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r0);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
@ -1573,6 +1616,13 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
static const int kVectorOffset =
InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
// Push the vector.
__ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
__ Push(r1);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@ -1595,33 +1645,28 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Push newTarget and callee functions
__ ldr(r0, MemOperand(fp, kNewTargetOffset));
__ push(r0);
// Push the constructor function as callee.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ push(r0);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ ldr(r4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
// Leave internal frame.
}
__ add(sp, sp, Operand(kStackSize * kPointerSize));

344
deps/v8/src/arm/code-stubs-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@ -14,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -33,7 +31,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
JS_FUNCTION_STUB_MODE);
}
}
@ -49,7 +47,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
JS_FUNCTION_STUB_MODE);
}
}
@ -255,6 +253,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@ -273,6 +274,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@ -675,26 +679,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
if (cc == eq && strict()) {
__ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
Builtins::JavaScript native;
if (cc == eq) {
native = Builtins::EQUALS;
} else {
DCHECK(cc == gt || cc == ge); // remaining cases
ncr = LESS;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
} else {
DCHECK(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
__ push(r0);
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
__ push(r0);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
}
__ bind(&miss);
GenerateMiss(masm);
@ -1583,7 +1591,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
__ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@ -1831,10 +1839,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
masm->isolate()),
2, 1);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@ -2378,32 +2383,41 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
bool is_super) {
// r0 : number of arguments to the construct function
// r2 : Feedback vector
// r3 : slot in feedback vector (Smi)
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
// r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
if (is_super) {
__ Push(r4);
}
__ CallStub(stub);
if (is_super) {
__ Pop(r4);
}
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : Feedback vector
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
// r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@ -2411,23 +2425,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into r4.
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// Load the cache state into r5.
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
// We don't know if r4 is a WeakCell or a Symbol, but it's harmless to read at
// We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
Register feedback_map = r6;
Register weak_value = r9;
__ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r4, HeapObject::kMapOffset));
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
@ -2445,8 +2459,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
}
@ -2455,14 +2469,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
__ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@ -2470,22 +2484,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ b(&done);
__ bind(&not_array_function);
}
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ bind(&done);
}
@ -2535,8 +2549,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r1, r3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ push(r1);
__ mov(r0, r3);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ pop(r1);
}
__ str(r0, MemOperand(sp, argc * kPointerSize));
@ -2607,18 +2623,18 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : (only if r2 is not the megamorphic symbol) slot in feedback
// vector (Smi)
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : original constructor (for IsSuperConstructorCall)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
if (FLAG_pretenuring_call_new) {
@ -2642,9 +2658,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
__ mov(r4, Operand(1 * kPointerSize));
__ add(r4, r4, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r3, MemOperand(sp, r4));
__ mov(r3, r4);
} else {
__ mov(r3, r1);
}
@ -2658,10 +2672,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
// r4: object type
// r5: object type
Label do_call;
__ bind(&slow);
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@ -2898,11 +2912,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
: IC::kCallIC_Customization_Miss;
ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
__ CallExternalReference(miss, 3);
Runtime::FunctionId id = GetICState() == DEFAULT
? Runtime::kCallIC_Miss
: Runtime::kCallIC_Customization_Miss;
__ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@ -3014,10 +3027,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxOneByteCharCode) << kSmiTagSize)));
DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
__ tst(code_, Operand(kSmiTagMask |
((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@ -3294,7 +3306,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@ -3481,7 +3493,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@ -3762,7 +3774,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
@ -3814,15 +3826,12 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ CallExternalReference(miss, 3);
__ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@ -3883,7 +3892,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
DCHECK_EQ(kSmiTagSize, 1);
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@ -3973,8 +3982,8 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
DCHECK(NameDictionary::kEntrySize == 3);
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
@ -4058,10 +4067,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
DCHECK_EQ(kSmiTagSize, 1);
STATIC_ASSERT(kSmiTagSize == 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
@ -4528,7 +4537,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
receiver, name, feedback,
receiver_map, scratch1, r9);
__ bind(&miss);
@ -4667,8 +4676,9 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize;
PredictableCodeSizeScope predictable(masm, code_size);
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize(&stub) +
2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@ -4772,12 +4782,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
DCHECK(FAST_SMI_ELEMENTS == 0);
DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
DCHECK(FAST_ELEMENTS == 2);
DCHECK(FAST_HOLEY_ELEMENTS == 3);
DCHECK(FAST_DOUBLE_ELEMENTS == 4);
DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
@ -5051,6 +5061,158 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register result = r0;
Register slot = r2;
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
__ ldr(result, ContextOperand(result));
__ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
__ Ret(ne);
// Fallback to runtime.
__ SmiTag(slot);
__ push(slot);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
}
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register value = r0;
Register slot = r2;
Register cell = r1;
Register cell_details = r4;
Register cell_value = r5;
Register cell_value_map = r6;
Register scratch = r9;
Register context = cp;
Register context_temp = cell;
Label fast_heapobject_case, fast_smi_case, slow_case;
if (FLAG_debug_code) {
__ CompareRoot(value, Heap::kTheHoleValueRootIndex);
__ Check(ne, kUnexpectedValue);
}
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
__ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
__ ldr(cell, ContextOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
__ SmiUntag(cell_details);
__ and_(cell_details, cell_details,
Operand(PropertyDetails::PropertyCellTypeField::kMask |
PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesReadOnlyMask));
// Check if PropertyCell holds mutable data.
Label not_mutable_data;
__ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kMutable) |
PropertyDetails::KindField::encode(kData)));
__ b(ne, &not_mutable_data);
__ JumpIfSmi(value, &fast_smi_case);
__ bind(&fast_heapobject_case);
__ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
// RecordWriteField clobbers the value register, so we copy it before the
// call.
__ mov(r4, Operand(value));
__ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(&not_mutable_data);
// Check if PropertyCell value matches the new value (relevant for Constant,
// ConstantType and Undefined cells).
Label not_same_value;
__ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
__ cmp(cell_value, value);
__ b(ne, &not_same_value);
// Make sure the PropertyCell is not marked READ_ONLY.
__ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
__ b(ne, &slow_case);
if (FLAG_debug_code) {
Label done;
// This can only be true for Constant, ConstantType and Undefined cells,
// because we never store the_hole via this stub.
__ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstant) |
PropertyDetails::KindField::encode(kData)));
__ b(eq, &done);
__ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ b(eq, &done);
__ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kUndefined) |
PropertyDetails::KindField::encode(kData)));
__ Check(eq, kUnexpectedValue);
__ bind(&done);
}
__ Ret();
__ bind(&not_same_value);
// Check if PropertyCell contains data with constant type (and is not
// READ_ONLY).
__ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ b(ne, &slow_case);
// Now either both old and new values must be smis or both must be heap
// objects with same map.
Label value_is_heap_object;
__ JumpIfNotSmi(value, &value_is_heap_object);
__ JumpIfNotSmi(cell_value, &slow_case);
// Old and new values are smis, no need for a write barrier here.
__ bind(&fast_smi_case);
__ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
__ Ret();
__ bind(&value_is_heap_object);
__ JumpIfSmi(cell_value, &slow_case);
__ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ cmp(cell_value_map, scratch);
__ b(eq, &fast_heapobject_case);
// Fallback to runtime.
__ bind(&slow_case);
__ SmiTag(slot);
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2, 1);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}

2
deps/v8/src/arm/code-stubs-arm.h

@ -5,6 +5,8 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
#include "src/arm/frames-arm.h"
namespace v8 {
namespace internal {

9
deps/v8/src/arm/codegen-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
@ -888,10 +886,9 @@ CodeAgingHelper::CodeAgingHelper() {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
SmartPointer<CodePatcher> patcher(
new CodePatcher(young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
base::SmartPointer<CodePatcher> patcher(new CodePatcher(
young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());

3
deps/v8/src/arm/codegen-arm.h

@ -12,9 +12,6 @@ namespace v8 {
namespace internal {
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the

2
deps/v8/src/arm/constants-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"

6
deps/v8/src/arm/constants-arm.h

@ -5,6 +5,12 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
#include <stdint.h>
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/globals.h"
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
#error ARM EABI support is required.

2
deps/v8/src/arm/cpu-arm.cc

@ -12,8 +12,6 @@
#endif
#endif
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"

248
deps/v8/src/arm/debug-arm.cc

@ -1,248 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/codegen.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code changing the return from JS function sequence from
// mov sp, fp
// ldmia sp!, {fp, lr}
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
// ldr ip, [pc, #0]
// blx ip
// <debug break return code entry point address>
// bkpt 0
CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
patcher.masm()->bkpt(0);
}
void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
// mov r2, r2
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
// ldr ip, [pc, #0]
// blx ip
// <debug break slot code entry point address>
CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
}
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
__ push(ip);
}
__ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(ip);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
DCHECK((object_regs & ~kJSCallerSaved) == 0);
DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
DCHECK((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
__ Assert(eq, kUnableToEncodeValueAsSmi);
}
__ SmiTag(reg);
}
}
__ stm(db_w, sp, object_regs | non_object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
if ((object_regs | non_object_regs) != 0) {
__ ldm(ia_w, sp, object_regs | non_object_regs);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
__ SmiUntag(reg);
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ mov(reg, Operand(kDebugZapValue));
}
}
}
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}
void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- r1 : function
// -- r3 : slot in feedback array (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
}
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
}
void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
}
void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
// -- r2 : feedback array
// -- r3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
}
DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
masm->InstructionsGeneratedSince(&check_codesize));
}
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
__ Ret();
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference::debug_restarter_frame_function_pointer_address(
masm->isolate());
__ mov(ip, Operand(restarter_frame_function_slot));
__ mov(r1, Operand::Zero());
__ str(r1, MemOperand(ip, 0));
// Load the function pointer off of our current stack frame.
__ ldr(r1, MemOperand(fp,
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
// Load context from the function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Get function code.
__ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Re-run JSFunction, r1 is function, cp is context.
__ Jump(ip);
}
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/deoptimizer-arm.cc

@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {

2
deps/v8/src/arm/disasm-arm.cc

@ -28,8 +28,6 @@
#include <stdio.h>
#include <string.h>
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"

2
deps/v8/src/arm/frames-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/assembler.h"

6
deps/v8/src/arm/frames-arm.h

@ -128,12 +128,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_

41
deps/v8/src/arm/interface-descriptors-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
const Register InstanceofDescriptor::left() { return r0; }
@ -62,6 +64,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
void StoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
@ -83,6 +93,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
// static
const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
@ -158,11 +172,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : (only if r2 is not the megamorphic symbol) slot in feedback
// vector (Smi)
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r2};
Register registers[] = {r0, r1, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -353,11 +367,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void MathRoundVariantCallFromOptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
r4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

52
deps/v8/src/arm/lithium-arm.cc

@ -4,8 +4,6 @@
#include <sstream>
#include "src/v8.h"
#include "src/arm/lithium-codegen-arm.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@ -330,6 +328,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
stream->Add("depth:%d slot:%d", depth(), slot_index());
}
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@ -348,6 +351,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
value()->PrintTo(stream);
}
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
@ -1661,8 +1670,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
DCHECK(instr->left()->representation().IsExternal());
DCHECK(instr->right()->representation().IsInteger32());
DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
@ -2150,6 +2158,15 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
HLoadGlobalViaContext* instr) {
LOperand* context = UseFixed(instr->context(), cp);
DCHECK(instr->slot_index() > 0);
LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@ -2218,7 +2235,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
if (!instr->is_typed_elements()) {
if (!instr->is_fixed_typed_array()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseRegister(instr->elements());
@ -2238,10 +2255,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
bool needs_environment;
if (instr->is_external() || instr->is_fixed_typed_array()) {
if (instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS) &&
needs_environment = elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
@ -2276,7 +2292,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_typed_elements()) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
@ -2308,10 +2324,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
DCHECK(instr->elements()->representation().IsExternal());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
@ -2437,6 +2450,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
HStoreGlobalViaContext* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(),
StoreGlobalViaContextDescriptor::ValueRegister());
DCHECK(instr->slot_index() > 0);
LStoreGlobalViaContext* result =
new (zone()) LStoreGlobalViaContext(context, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), r1);

52
deps/v8/src/arm/lithium-arm.h

@ -102,6 +102,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
V(LoadGlobalViaContext) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
@ -143,6 +144,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
V(StoreGlobalViaContext) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@ -1645,15 +1647,9 @@ class LLoadKeyed final : public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
bool is_external() const {
return hydrogen()->is_external();
}
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@ -1700,7 +1696,23 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
};
class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
public:
explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
void PrintDataTo(StringStream* stream) override;
LOperand* context() { return inputs_[0]; }
int depth() const { return hydrogen()->depth(); }
int slot_index() const { return hydrogen()->slot_index(); }
};
@ -2205,6 +2217,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
};
class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
public:
LStoreGlobalViaContext(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
"store-global-via-context")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
void PrintDataTo(StringStream* stream) override;
int depth() { return hydrogen()->depth(); }
int slot_index() { return hydrogen()->slot_index(); }
LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
@ -2213,13 +2247,9 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
bool is_external() const { return hydrogen()->is_external(); }
bool is_fixed_typed_array() const {
return hydrogen()->is_fixed_typed_array();
}
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }

138
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
#include "src/base/bits.h"
@ -106,7 +104,7 @@ bool LCodeGen::GeneratePrologue() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at");
}
#endif
@ -427,6 +425,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
Handle<Object> literal = constant->handle(isolate());
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
AllowDeferredHandleDereference get_number;
DCHECK(literal->IsNumber());
__ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
@ -648,15 +647,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
int index = op->index();
if (index >= 0) {
index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
}
if (is_tagged) {
translation->StoreStackSlot(op->index());
translation->StoreStackSlot(index);
} else if (is_uint32) {
translation->StoreUint32StackSlot(op->index());
translation->StoreUint32StackSlot(index);
} else {
translation->StoreInt32StackSlot(op->index());
translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
int index = op->index();
if (index >= 0) {
index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
}
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@ -2267,6 +2274,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ b(eq, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
@ -2969,13 +2982,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(r0));
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
Handle<Code> stub =
CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
}
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@ -3068,7 +3099,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3162,17 +3193,13 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
if (elements_kind == FLOAT32_ELEMENTS) {
__ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
@ -3184,29 +3211,22 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ ldrb(result, mem_operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ ldrh(result, mem_operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ ldr(result, mem_operand);
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@ -3216,8 +3236,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@ -3327,7 +3345,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
if (instr->is_typed_elements()) {
if (instr->is_fixed_typed_array()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
@ -3570,12 +3588,11 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
__ push(cp); // The context is the first argument.
__ Move(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@ -4220,6 +4237,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->value())
.is(StoreGlobalViaContextDescriptor::ValueRegister()));
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
isolate(), depth, instr->language_mode())
.code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ push(StoreGlobalViaContextDescriptor::ValueRegister());
__ CallRuntime(is_strict(instr->language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2);
}
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
@ -4262,10 +4303,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
? (element_size_shift - kSmiTagSize) : element_size_shift;
int base_offset = instr->base_offset();
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
@ -4278,8 +4316,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
} else {
__ add(address, external_pointer, Operand(key, LSL, shift_size));
}
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
if (elements_kind == FLOAT32_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), address, base_offset);
} else { // Storing doubles, not floats.
@ -4292,30 +4329,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
element_size_shift, shift_size,
base_offset);
switch (elements_kind) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_INT8_ELEMENTS:
case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ strb(value, mem_operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ strh(value, mem_operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ str(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@ -4421,7 +4449,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
if (instr->is_typed_elements()) {
if (instr->is_fixed_typed_array()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
@ -5630,10 +5658,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
final_branch_condition = lt;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label);
@ -5680,6 +5705,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
} else if (String::Equals(type_name, factory->type##_string())) { \
__ JumpIfSmi(input, false_label); \
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
__ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \
final_branch_condition = eq;
SIMD128_TYPES(SIMD128_TYPE)
#undef SIMD128_TYPE
// clang-format on
} else {
__ b(false_label);
}
@ -5800,8 +5836,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
Handle<Code> stack_check = isolate()->builtins()->StackCheck();
PredictableCodeSizeScope predictable(masm(),
CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
PredictableCodeSizeScope predictable(masm());
predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
DCHECK(instr->context()->IsRegister());
DCHECK(ToRegister(instr->context()).is(cp));
CallCode(stack_check, RelocInfo::CODE_TARGET, instr);

2
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"

2
deps/v8/src/arm/lithium-gap-resolver-arm.h

@ -5,8 +5,6 @@
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#include "src/v8.h"
#include "src/lithium.h"
namespace v8 {

47
deps/v8/src/arm/macro-assembler-arm.cc

@ -4,8 +4,6 @@
#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
@ -13,7 +11,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -1434,10 +1432,11 @@ void MacroAssembler::IsObjectNameType(Register object,
void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
mov(r1,
Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@ -1875,26 +1874,6 @@ void MacroAssembler::Allocate(Register object_size,
}
void MacroAssembler::UndoAllocationInNewSpace(Register object,
Register scratch) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
and_(object, object, Operand(~kHeapObjectTagMask));
#ifdef DEBUG
// Check that the object un-allocated is below the current top.
mov(scratch, Operand(new_space_allocation_top));
ldr(scratch, MemOperand(scratch));
cmp(object, scratch);
Check(lt, kUndoAllocationOfNonAllocatedMemory);
#endif
// Write the address of the object to un-allocate as the current top.
mov(scratch, Operand(new_space_allocation_top));
str(object, MemOperand(scratch));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@ -3809,23 +3788,35 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
b(eq, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
cmp(scratch1, Operand(JS_OBJECT_TYPE));
b(lo, found);
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
cmp(current, Operand(factory->null_value()));
CompareRoot(current, Heap::kNullValueRootIndex);
b(ne, &loop_again);
bind(&end);
}

78
deps/v8/src/arm/macro-assembler-arm.h

@ -13,6 +13,19 @@
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {kRegister_r0_Code};
const Register kReturnRegister1 = {kRegister_r1_Code};
const Register kJSFunctionRegister = {kRegister_r1_Code};
const Register kContextRegister = {kRegister_r7_Code};
const Register kInterpreterAccumulatorRegister = {kRegister_r0_Code};
const Register kInterpreterRegisterFileRegister = {kRegister_r4_Code};
const Register kInterpreterBytecodeOffsetRegister = {kRegister_r5_Code};
const Register kInterpreterBytecodeArrayRegister = {kRegister_r6_Code};
const Register kInterpreterDispatchTableRegister = {kRegister_r8_Code};
const Register kRuntimeCallFunctionRegister = {kRegister_r1_Code};
const Register kRuntimeCallArgCountRegister = {kRegister_r0_Code};
// ----------------------------------------------------------------------------
// Static helper functions
@ -250,7 +263,7 @@ class MacroAssembler: public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@ -325,9 +338,7 @@ class MacroAssembler: public Assembler {
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@ -347,12 +358,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!src1.is(src4));
DCHECK(!src2.is(src4));
DCHECK(!src3.is(src4));
DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@ -374,6 +380,36 @@ class MacroAssembler: public Assembler {
}
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5, Condition cond = al) {
DCHECK(!AreAliased(src1, src2, src3, src4, src5));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
if (src4.code() > src5.code()) {
stm(db_w, sp,
src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
cond);
} else {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
cond);
str(src5, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
Push(src4, src5, cond);
}
} else {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
Push(src3, src4, src5, cond);
}
} else {
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
Push(src2, src3, src4, src5, cond);
}
}
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
DCHECK(!src1.is(src2));
@ -387,9 +423,7 @@ class MacroAssembler: public Assembler {
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@ -409,12 +443,7 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!src1.is(src4));
DCHECK(!src2.is(src4));
DCHECK(!src3.is(src4));
DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@ -745,13 +774,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
// are left to the object(s) no longer allocated as they would be invalid when
// allocation is undone.
void UndoAllocationInNewSpace(Register object, Register scratch);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@ -1513,7 +1535,7 @@ class CodePatcher {
CodePatcher(byte* address,
int instructions,
FlushICache flush_cache = FLUSH);
virtual ~CodePatcher();
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@ -1539,7 +1561,7 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand ContextOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}

17
deps/v8/src/arm/simulator-arm.cc

@ -6,8 +6,6 @@
#include <stdlib.h>
#include <cmath>
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
@ -1229,9 +1227,15 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// The simulator uses a separate JS stack. If we have exhausted the C stack,
// we also drop down the JS limit to reflect the exhaustion on the JS stack.
if (GetCurrentStackPosition() < c_limit) {
return reinterpret_cast<uintptr_t>(get_sp());
}
// Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
// to prevent overrunning the stack when pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
@ -4011,6 +4015,9 @@ void Simulator::Execute() {
void Simulator::CallInternal(byte* entry) {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation

13
deps/v8/src/arm/simulator-arm.h

@ -181,12 +181,12 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
Address get_sp() {
Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes ARM instructions until the PC reaches end_sim_pc.
void Execute();
@ -439,15 +439,14 @@ class Simulator {
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
// stack cause stack overflow errors, since the simulator ignores the input.
// This is unlikely to be an issue in practice, though it might cause testing
// trouble down the line.
// the C-based native code. The JS-based limit normally points near the end of
// the simulator stack. When the C-based limit is exhausted we reflect that by
// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {

63
deps/v8/src/arm64/assembler-arm64-inl.h

@ -7,7 +7,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
namespace v8 {
@ -17,7 +17,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
DCHECK(RelocInfo::IsInternalReference(rmode_));
@ -611,11 +611,6 @@ Address Assembler::target_address_from_return_address(Address pc) {
}
Address Assembler::break_address_from_return_address(Address pc) {
return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
// The call, generated by MacroAssembler::Call, is one of two possible
// sequences:
@ -825,18 +820,18 @@ void RelocInfo::set_code_age_stub(Code* stub,
}
Address RelocInfo::call_address() {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Address RelocInfo::debug_call_address() {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// For the above sequences the Relocinfo points to the load literal loading
// the call address.
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_call_address(Address target) {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@ -862,7 +857,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
// See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
@ -888,11 +883,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
@ -913,11 +905,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
@ -973,32 +962,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDNP_x : LDNP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDNP_d : LDNP_s;
}
}
LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? STNP_x : STNP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STNP_d : STNP_s;
}
}
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;

57
deps/v8/src/arm64/assembler-arm64.cc

@ -26,13 +26,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
@ -1628,37 +1627,6 @@ void Assembler::LoadStorePair(const CPURegister& rt,
}
void Assembler::ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src) {
LoadStorePairNonTemporal(rt, rt2, src,
LoadPairNonTemporalOpFor(rt, rt2));
}
void Assembler::stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
LoadStorePairNonTemporal(rt, rt2, dst,
StorePairNonTemporalOpFor(rt, rt2));
}
void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op) {
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
DCHECK(IsImmLSPair(addr.offset(), size));
int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
@ -2902,21 +2870,18 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) ||
(rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON)) {
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsDeoptReason(rmode)
|| RelocInfo::IsPosition(rmode)
|| RelocInfo::IsInternalReference(rmode)
|| RelocInfo::IsConstPool(rmode)
|| RelocInfo::IsVeneerPool(rmode));
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsGeneratorContinuation(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);

39
deps/v8/src/arm64/assembler-arm64.h

@ -893,9 +893,6 @@ class Assembler : public AssemblerBase {
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
// Return the code target address of the patch debug break slot
inline static Address break_address_from_return_address(Address pc);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@ -955,25 +952,13 @@ class Assembler : public AssemblerBase {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
// Number of instructions generated for the return sequence in
// FullCodeGenerator::EmitReturnSequence.
static const int kJSReturnSequenceInstructions = 7;
static const int kJSReturnSequenceLength =
kJSReturnSequenceInstructions * kInstructionSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 0;
static const int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
// See DebugCodegen::GenerateSlot() and
// BreakLocation::SetDebugBreakAtSlot().
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotInstructions = 5;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
@ -1022,11 +1007,11 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
// Record the emission of a constant pool.
//
@ -1507,14 +1492,6 @@ class Assembler : public AssemblerBase {
// Load word pair with sign extension.
void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
// Load integer or FP register pair, non-temporal.
void ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
// Store integer or FP register pair, non-temporal.
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
// Load literal to register from a pc relative address.
void ldr_pcrel(const CPURegister& rt, int imm19);
@ -2022,10 +1999,6 @@ class Assembler : public AssemblerBase {
static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
@ -2051,10 +2024,6 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
void LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,

442
deps/v8/src/arm64/builtins-arm64.cc

@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -302,36 +301,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
static void Generate_Runtime_NewObject(MacroAssembler* masm,
bool create_memento,
Register original_constructor,
Label* count_incremented,
Label* allocated) {
if (create_memento) {
// Get the cell or allocation site.
__ Peek(x4, 2 * kXRegSize);
__ Push(x4);
__ Push(x1); // Argument for Runtime_NewObject.
__ Push(original_constructor);
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
__ Mov(x4, x0);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
// increment.
__ jmp(count_incremented);
} else {
__ Push(x1); // Argument for Runtime_NewObject.
__ Push(original_constructor);
__ CallRuntime(Runtime::kNewObject, 2);
__ Mov(x4, x0);
__ jmp(allocated);
}
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@ -352,44 +323,35 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the three incoming parameters on the stack.
if (create_memento) {
__ AssertUndefinedOrAllocationSite(x2, x10);
__ Push(x2);
}
// Preserve the four incoming parameters on the stack.
Register argc = x0;
Register constructor = x1;
Register allocation_site = x2;
Register original_constructor = x3;
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ SmiTag(argc);
if (use_new_target) {
__ Push(argc, constructor, original_constructor);
} else {
__ Push(argc, constructor);
}
// sp[0]: new.target (if used)
// sp[0/1]: Constructor function.
// sp[1/2]: number of arguments (smi-tagged)
Label rt_call, count_incremented, allocated, normal_new;
__ Cmp(constructor, original_constructor);
__ B(eq, &normal_new);
Generate_Runtime_NewObject(masm, create_memento, original_constructor,
&count_incremented, &allocated);
__ Bind(&normal_new);
__ Push(allocation_site, argc, constructor, original_constructor);
// sp[0]: new.target
// sp[1]: Constructor function.
// sp[2]: number of arguments (smi-tagged)
// sp[3]: allocation site
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate);
__ Mov(x2, Operand(debug_step_in_fp));
__ Ldr(x2, MemOperand(x2));
__ Cbnz(x2, &rt_call);
// Fall back to runtime if the original constructor and function differ.
__ Cmp(constructor, original_constructor);
__ B(ne, &rt_call);
// Load the initial map and verify that it is in fact a map.
Register init_map = x2;
__ Ldr(init_map,
@ -430,15 +392,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Now allocate the JSObject on the heap.
Label rt_call_reload_new_target;
Register obj_size = x3;
Register new_obj = x4;
__ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
if (create_memento) {
__ Add(x7, obj_size,
Operand(AllocationMemento::kSize / kPointerSize));
__ Allocate(x7, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
__ Allocate(x7, new_obj, x10, x11, &rt_call_reload_new_target,
SIZE_IN_WORDS);
} else {
__ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
__ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
SIZE_IN_WORDS);
}
// Allocated the JSObject, now initialize the fields. Map is set to
@ -460,15 +425,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Obtain number of pre-allocated property fields and in-object
// properties.
Register prealloc_fields = x10;
Register unused_props = x10;
Register inobject_props = x11;
Register inst_sizes = x11;
__ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
__ Ubfx(prealloc_fields, inst_sizes,
Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ Ubfx(inobject_props, inst_sizes,
Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
Register inst_sizes_or_attrs = x11;
Register prealloc_fields = x10;
__ Ldr(inst_sizes_or_attrs,
FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
__ Ubfx(unused_props, inst_sizes_or_attrs,
Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
__ Ldr(inst_sizes_or_attrs,
FieldMemOperand(init_map, Map::kInstanceSizesOffset));
__ Ubfx(
inobject_props, inst_sizes_or_attrs,
Map::kInObjectPropertiesOrConstructorFunctionIndexByte * kBitsPerByte,
kBitsPerByte);
__ Sub(prealloc_fields, inobject_props, unused_props);
// Calculate number of property fields in the object.
Register prop_fields = x6;
@ -511,7 +482,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
// Load the AllocationSite
__ Peek(x14, 2 * kXRegSize);
__ Peek(x14, 3 * kXRegSize);
__ AssertUndefinedOrAllocationSite(x14, x10);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
first_prop = NoReg;
@ -523,72 +495,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
// failures need to undo the allocation, so that the heap is in a
// consistent state and verifiable.
// and jump into the continuation code at any time from now on.
__ Add(new_obj, new_obj, kHeapObjectTag);
// Check if a non-empty properties array is needed. Continue with
// allocated object if not; allocate and initialize a FixedArray if yes.
Register element_count = x3;
__ Ldrb(element_count,
FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
__ Add(element_count, element_count, prealloc_fields);
__ Subs(element_count, element_count, inobject_props);
// Done if no extra properties are to be allocated.
__ B(eq, &allocated);
__ Assert(pl, kPropertyAllocationCountFailed);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
Register new_array = x5;
Register array_size = x6;
__ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
__ Allocate(array_size, new_array, x11, x12, &undo_allocation,
static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
SIZE_IN_WORDS));
Register array_map = x10;
__ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
__ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
__ SmiTag(x0, element_count);
__ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
// Initialize the fields to undefined.
Register elements = x10;
__ Add(elements, new_array, FixedArray::kHeaderSize);
__ FillFields(elements, element_count, filler);
// Store the initialized FixedArray into the properties field of the
// JSObject.
__ Add(new_array, new_array, kHeapObjectTag);
__ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
// Continue with JSObject being successfully allocated.
__ B(&allocated);
// Undo the setting of the new top so that the heap is verifiable. For
// example, the map's unused properties potentially do not match the
// allocated objects unused properties.
__ Bind(&undo_allocation);
__ UndoAllocationInNewSpace(new_obj, x14);
// Reload the original constructor and fall-through.
__ Bind(&rt_call_reload_new_target);
__ Peek(x3, 0 * kXRegSize);
}
// Allocate the new receiver object using the runtime call.
// x1: constructor function
// x3: original constructor
__ Bind(&rt_call);
Generate_Runtime_NewObject(masm, create_memento, constructor,
&count_incremented, &allocated);
Label count_incremented;
if (create_memento) {
// Get the cell or allocation site.
__ Peek(x4, 3 * kXRegSize);
__ Push(x4, constructor, original_constructor); // arguments 1-3
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
__ Mov(x4, x0);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
// increment.
__ B(&count_incremented);
} else {
__ Push(constructor, original_constructor); // arguments 1-2
__ CallRuntime(Runtime::kNewObject, 2);
__ Mov(x4, x0);
}
// Receiver for constructor call allocated.
// x4: JSObject
__ Bind(&allocated);
if (create_memento) {
int offset = (use_new_target ? 3 : 2) * kXRegSize;
__ Peek(x10, offset);
__ Peek(x10, 3 * kXRegSize);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@ -601,9 +545,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Restore the parameters.
if (use_new_target) {
__ Pop(original_constructor);
}
__ Pop(original_constructor);
__ Pop(constructor);
// Reload the number of arguments from the stack.
@ -612,11 +554,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
if (use_new_target) {
__ Push(original_constructor, x4, x4);
} else {
__ Push(x4, x4);
}
__ Push(original_constructor, x4, x4);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -628,8 +566,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
// jssp[2]: new.target (if used)
// jssp[2/3]: number of arguments (smi-tagged)
// jssp[2]: new.target
// jssp[3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@ -660,17 +598,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
// jssp[1]: new.target
// jssp[2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -698,10 +634,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kXRegSize;
__ Peek(x1, offset);
// jssp[1]: new.target (original constructor)
// jssp[2]: number of arguments (smi-tagged)
__ Peek(x1, 2 * kXRegSize);
// Leave construct frame.
}
@ -714,17 +649,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, true, false);
}
@ -739,18 +669,18 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
// TODO(dslomov): support pretenuring
CHECK(!FLAG_pretenuring_call_new);
{
FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
__ AssertUndefinedOrAllocationSite(x2, x10);
__ Mov(x4, x0);
__ SmiTag(x4);
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Push(x4, x3, x10);
// sp[0]: number of arguments
__ Push(x2, x4, x3, x10);
// sp[0]: receiver (the hole)
// sp[1]: new.target
// sp[2]: receiver (the hole)
// sp[2]: number of arguments
// sp[3]: allocation site
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -964,6 +894,144 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
// count expected by the function.
//
// The live registers are:
// - x1: the JS function object being called.
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
// - lr: return address.
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm64.h for its layout.
// TODO(rmcilroy): We will need to include the current bytecode pointer in the
// frame.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister,
kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
__ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ Sub(x10, jssp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ Bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// Note: there should always be at least one stack slot for the return
// register in the register file.
Label loop_header;
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
// TODO(rmcilroy): Ensure we always have an even number of registers to
// allow stack to be 16 bit aligned (and remove need for jssp).
__ Lsr(x11, x11, kPointerSizeLog2);
__ PushMultipleTimes(x10, x11);
__ Bind(&loop_header);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Allow simulator stop operations if FLAG_stop_at is set.
// - Deal with sloppy mode functions which need to replace the
// receiver with the global proxy when called as functions (without an
// explicit receiver object).
// - Code aging of the BytecodeArray object.
// - Supporting FLAG_trace.
//
// The following items are also not done here, and will probably be done using
// explicit bytecodes instead:
// - Allocating a new local context if applicable.
// - Setting up a local binding to the this function, which is used in
// derived constructors with super calls.
// - Setting new.target if required.
// - Dealing with REST parameters (only if
// https://codereview.chromium.org/1235153006 doesn't land by then).
// - Dealing with argument objects.
// Perform stack guard check.
{
Label ok;
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
__ CallRuntime(Runtime::kStackGuard, 0);
__ Bind(&ok);
}
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Sub(kInterpreterRegisterFileRegister, fp,
Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
Heap::kInterpreterTableRootIndex);
__ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Dispatch to the first bytecode handler for the function.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip0);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's EmitReturnSequence.
// - Supporting FLAG_trace for Runtime::TraceExit.
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// The return value is in accumulator, which is already in x0.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments.
// TODO(rmcilroy): Get number of arguments from BytecodeArray.
__ Drop(1, kXRegSize);
__ Ret();
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
@ -1291,8 +1359,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(argc, receiver);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Push(argc);
__ Mov(x0, receiver);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Mov(receiver, x0);
__ Pop(argc);
@ -1400,6 +1470,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int vectorOffset,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
@ -1417,12 +1488,9 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ Mov(slot, Smi::FromInt(index));
__ Mov(vector, feedback_vector);
int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
__ Ldr(vector, MemOperand(fp, vectorOffset));
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
@ -1457,14 +1525,24 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kVectorOffset =
InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
Register args = x12;
Register receiver = x14;
Register function = x15;
Register apply_function = x1;
// Push the vector.
__ Ldr(
apply_function,
FieldMemOperand(apply_function, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(apply_function,
FieldMemOperand(apply_function,
SharedFunctionInfo::kFeedbackVectorOffset));
__ Push(apply_function);
// Get the length of the arguments via a builtin call.
__ Ldr(function, MemOperand(fp, kFunctionOffset));
@ -1518,8 +1596,9 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
// Call a builtin to convert the receiver to a regular object.
__ Bind(&convert_receiver_to_object);
__ Push(receiver);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Mov(x0, receiver);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Mov(receiver, x0);
__ B(&push_receiver);
@ -1532,8 +1611,8 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(receiver);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
// At the end of the loop, the number of arguments is stored in 'current',
// represented as a smi.
@ -1576,16 +1655,25 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kVectorOffset =
InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
// Is x11 safe to use?
Register newTarget = x11;
Register args = x12;
Register function = x15;
Register construct_function = x1;
// Push the vector.
__ Ldr(construct_function,
FieldMemOperand(construct_function,
JSFunction::kSharedFunctionInfoOffset));
__ Ldr(construct_function,
FieldMemOperand(construct_function,
SharedFunctionInfo::kFeedbackVectorOffset));
__ Push(construct_function);
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
@ -1606,24 +1694,24 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
// Push current limit and index, constructor & newTarget
// Push current limit and index & constructor function as callee.
__ Mov(x1, 0); // Initial index.
__ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
__ Push(argc, x1, newTarget, function);
__ Push(argc, x1, function);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
__ Ldr(x1, MemOperand(fp, kFunctionOffset));
// Use undefined feedback vector
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ Ldr(x1, MemOperand(fp, kFunctionOffset));
__ Ldr(x4, MemOperand(fp, kNewTargetOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
// Leave internal frame.
}
__ Drop(kStackSize);
__ Ret();

287
deps/v8/src/arm64/code-stubs-arm64.cc

@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/frames-arm64.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@ -13,8 +12,8 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -36,7 +35,7 @@ static void InitializeArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
JS_FUNCTION_STUB_MODE);
}
}
@ -70,7 +69,7 @@ static void InitializeInternalArrayConstructorDescriptor(
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
JS_FUNCTION_STUB_MODE);
}
}
@ -227,6 +226,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Cmp(right_type, SIMD128_VALUE_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
@ -246,6 +248,9 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Cmp(right_type, SIMD128_VALUE_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
@ -645,26 +650,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cond == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
if (cond == eq && strict()) {
__ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
} else {
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
Builtins::JavaScript native;
if (cond == eq) {
native = Builtins::EQUALS;
} else {
DCHECK((cond == gt) || (cond == ge)); // remaining cases
ncr = LESS;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
} else {
DCHECK((cond == gt) || (cond == ge)); // remaining cases
ncr = LESS;
}
__ Mov(x10, Smi::FromInt(ncr));
__ Push(x10);
}
__ Mov(x10, Smi::FromInt(ncr));
__ Push(x10);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
}
__ Bind(&miss);
GenerateMiss(masm);
@ -1731,7 +1740,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
__ TailCallRuntime(Runtime::kArguments, 1, 1);
}
@ -2050,10 +2059,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
masm->isolate()),
2, 1);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@ -2451,8 +2457,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Find the code object based on the assumptions above.
// kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
// of kPointerSize to reach the latter.
DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
JSRegExp::kDataUC16CodeOffset);
STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
JSRegExp::kDataUC16CodeOffset);
__ Mov(x10, kPointerSize);
// We will need the encoding later: Latin1 = 0x04
// UC16 = 0x00
@ -2742,18 +2748,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector,
Register index) {
Register feedback_vector, Register index,
Register orig_construct, bool is_super) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
if (is_super) {
__ Push(argc, function, feedback_vector, index, orig_construct);
} else {
__ Push(argc, function, feedback_vector, index);
}
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
__ Pop(index, feedback_vector, function, argc);
if (is_super) {
__ Pop(orig_construct, index, feedback_vector, function, argc);
} else {
__ Pop(index, feedback_vector, function, argc);
}
__ SmiUntag(argc);
}
@ -2761,17 +2775,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
Register scratch1, Register scratch2,
Register scratch3) {
Register orig_construct, Register scratch1,
Register scratch2, Register scratch3,
bool is_super) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
feedback_vector, index));
feedback_vector, index, orig_construct));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
// orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@ -2850,7 +2866,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index);
feedback_vector, index, orig_construct,
is_super);
__ B(&done);
__ Bind(&not_array_function);
@ -2858,7 +2875,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index);
feedback_vector, index, orig_construct, is_super);
__ Bind(&done);
}
@ -2907,8 +2924,10 @@ static void EmitSlowCase(MacroAssembler* masm,
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(x1, x3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Push(x1);
__ Mov(x0, x3);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(x1);
}
__ Poke(x0, argc * kPointerSize);
@ -2985,7 +3004,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label slow, non_function_call;
@ -2997,7 +3017,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
&slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11);
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
IsSuperConstructorCall());
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
if (FLAG_pretenuring_call_new) {
@ -3020,9 +3041,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
if (IsSuperConstructorCall()) {
__ Mov(x4, Operand(1 * kPointerSize));
__ Add(x4, x4, Operand(x0, LSL, kPointerSizeLog2));
__ Peek(x3, x4);
__ Mov(x3, x4);
} else {
__ Mov(x3, function);
}
@ -3299,11 +3318,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
: IC::kCallIC_Customization_Miss;
ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
__ CallExternalReference(miss, 3);
Runtime::FunctionId id = GetICState() == DEFAULT
? Runtime::kCallIC_Miss
: Runtime::kCallIC_Customization_Miss;
__ CallRuntime(id, 3);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@ -3672,7 +3690,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ Bind(&miss);
@ -3744,9 +3762,6 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
Register stub_entry = x11;
{
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
Register op = x10;
Register left = x1;
@ -3758,7 +3773,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
__ CallExternalReference(miss, 3);
__ CallRuntime(Runtime::kCompareIC_Miss, 3);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@ -4004,7 +4019,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ bind(&single_char);
// x1: result_length
@ -4212,7 +4227,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@ -4655,7 +4670,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
receiver, name, feedback,
receiver_map, scratch1, x7);
__ Bind(&miss);
@ -4930,7 +4945,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
DCHECK(NameDictionary::kEntrySize == 3);
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
@ -4999,7 +5014,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
@ -5090,7 +5105,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
DCHECK(NameDictionary::kEntrySize == 3);
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@ -5484,6 +5499,156 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register result = x0;
Register slot = x2;
Label slow_case;
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
__ Ldr(result, ContextMemOperand(result));
__ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
__ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
__ Ret();
// Fallback to runtime.
__ Bind(&slow_case);
__ SmiTag(slot);
__ Push(slot);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
}
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register value = x0;
Register slot = x2;
Register context_temp = x10;
Register cell = x10;
Register cell_details = x11;
Register cell_value = x12;
Register cell_value_map = x13;
Register value_map = x14;
Label fast_heapobject_case, fast_smi_case, slow_case;
if (FLAG_debug_code) {
__ CompareRoot(value, Heap::kTheHoleValueRootIndex);
__ Check(ne, kUnexpectedValue);
}
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
__ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
__ Ldr(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ Ldr(cell_details,
UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
__ And(cell_details, cell_details,
PropertyDetails::PropertyCellTypeField::kMask |
PropertyDetails::KindField::kMask |
PropertyDetails::kAttributesReadOnlyMask);
// Check if PropertyCell holds mutable data.
Label not_mutable_data;
__ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kMutable) |
PropertyDetails::KindField::encode(kData));
__ B(ne, &not_mutable_data);
__ JumpIfSmi(value, &fast_smi_case);
__ Bind(&fast_heapobject_case);
__ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
// RecordWriteField clobbers the value register, so we copy it before the
// call.
__ Mov(x11, value);
__ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ Bind(&not_mutable_data);
// Check if PropertyCell value matches the new value (relevant for Constant,
// ConstantType and Undefined cells).
Label not_same_value;
__ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
__ Cmp(cell_value, value);
__ B(ne, &not_same_value);
// Make sure the PropertyCell is not marked READ_ONLY.
__ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
__ B(ne, &slow_case);
if (FLAG_debug_code) {
Label done;
// This can only be true for Constant, ConstantType and Undefined cells,
// because we never store the_hole via this stub.
__ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstant) |
PropertyDetails::KindField::encode(kData));
__ B(eq, &done);
__ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData));
__ B(eq, &done);
__ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kUndefined) |
PropertyDetails::KindField::encode(kData));
__ Check(eq, kUnexpectedValue);
__ Bind(&done);
}
__ Ret();
__ Bind(&not_same_value);
// Check if PropertyCell contains data with constant type (and is not
// READ_ONLY).
__ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData));
__ B(ne, &slow_case);
// Now either both old and new values must be smis or both must be heap
// objects with same map.
Label value_is_heap_object;
__ JumpIfNotSmi(value, &value_is_heap_object);
__ JumpIfNotSmi(cell_value, &slow_case);
// Old and new values are smis, no need for a write barrier here.
__ Bind(&fast_smi_case);
__ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
__ Ret();
__ Bind(&value_is_heap_object);
__ JumpIfSmi(cell_value, &slow_case);
__ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
__ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
__ Cmp(cell_value_map, value_map);
__ B(eq, &fast_heapobject_case);
// Fall back to the runtime.
__ Bind(&slow_case);
__ SmiTag(slot);
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2, 1);
}
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.

2
deps/v8/src/arm64/codegen-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/simulator-arm64.h"

16
deps/v8/src/arm64/constants-arm64.h

@ -5,6 +5,8 @@
#ifndef V8_ARM64_CONSTANTS_ARM64_H_
#define V8_ARM64_CONSTANTS_ARM64_H_
#include "src/base/macros.h"
#include "src/globals.h"
// Assert that this is an LP64 system.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
@ -762,20 +764,6 @@ enum LoadStorePairOffsetOp {
#undef LOAD_STORE_PAIR_OFFSET
};
enum LoadStorePairNonTemporalOp {
LoadStorePairNonTemporalFixed = 0x28000000,
LoadStorePairNonTemporalFMask = 0x3B800000,
LoadStorePairNonTemporalMask = 0xFFC00000,
STNP_w = LoadStorePairNonTemporalFixed | STP_w,
LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
STNP_x = LoadStorePairNonTemporalFixed | STP_x,
LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
STNP_s = LoadStorePairNonTemporalFixed | STP_s,
LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
STNP_d = LoadStorePairNonTemporalFixed | STP_d,
LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
};
// Load literal.
enum LoadLiteralOp {
LoadLiteralFixed = 0x18000000,

3
deps/v8/src/arm64/cpu-arm64.cc

@ -4,12 +4,11 @@
// CPU specific code for arm independent of OS goes here.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {

305
deps/v8/src/arm64/debug-arm64.cc

@ -1,305 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
// ldp fp, lr, [sp] #16
// lrd ip0, [pc, #(3 * kInstructionSize)]
// add sp, sp, ip0
// ret
// <number of paramters ...
// ... plus one (64 bits)>
// to a call to the debug break return code.
// ldr ip0, [pc, #(3 * kInstructionSize)]
// blr ip0
// hlt kHltBadCode @ code should not return, catch if it does.
// <debug break return code ...
// ... entry point address (64 bits)>
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kJSReturnSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 5);
byte* entry =
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break return code.
patcher.blr(ip0);
patcher.hlt(kHltBadCode);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocation::SetDebugBreakAtSlot() {
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// to a call to the debug slot code.
// ldr ip0, [pc, #(2 * kInstructionSize)]
// blr ip0
// <debug break slot code ...
// ... entry point address (64 bits)>
// TODO(all): consider adding a hlt instruction after the blr as we don't
// expect control to return here. This implies increasing
// kDebugBreakSlotInstructions to 5 instructions.
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 4);
byte* entry =
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break slot code.
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
__ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
__ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
__ Push(scratch);
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
//
// Also:
// * object_regs may be modified during the C code by the garbage
// collector. Every object register must be a valid tagged pointer or
// SMI.
//
// * non_object_regs will be converted to SMIs so that the garbage
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
DCHECK((~kCallerSaved.list() & object_regs) == 0);
DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
DCHECK((object_regs & non_object_regs) == 0);
DCHECK((scratch.Bit() & object_regs) == 0);
DCHECK((scratch.Bit() & non_object_regs) == 0);
DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
__ Lsr(scratch, reg, 32);
__ SmiTagAndPush(scratch, reg);
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
}
if (object_regs != 0) {
__ PushXRegList(object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Mov(x0, 0); // No arguments.
__ Mov(x1, ExternalReference::debug_break(masm->isolate()));
CEntryStub stub(masm->isolate(), 1);
__ CallStub(&stub);
// Restore the register values from the expression stack.
if (object_regs != 0) {
__ PopXRegList(object_regs);
}
non_object_list =
CPURegList(CPURegister::kRegister, kXRegSizeInBits, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Load each non-object register from two SMIs.
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
Register reg = Register(non_object_list.PopHighestIndex());
__ Pop(scratch, reg);
__ Bfxil(reg, scratch, 32, 32);
}
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ Mov(scratch, after_break_target);
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- x1 : function
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
}
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
}
void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
}
void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -- x2 : feedback array
// -- x3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(
masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(Assembler::DEBUG_BREAK_NOP);
}
}
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
__ Ret();
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference::debug_restarter_frame_function_pointer_address(
masm->isolate());
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Mov(scratch, restarter_frame_function_slot);
__ Str(xzr, MemOperand(scratch));
// We do not know our frame height, but set sp based on fp.
__ Sub(masm->StackPointer(), fp, kPointerSize);
__ AssertStackConsistency();
__ Pop(x1, fp, lr); // Function, Frame, Return address.
// Load context from the function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// Get function code.
__ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
__ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
// Re-run JSFunction, x1 is function, cp is context.
__ Br(scratch);
}
const bool LiveEdit::kFrameDropperSupported = true;
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

3
deps/v8/src/arm64/decoder-arm64-inl.h

@ -231,7 +231,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStorePairNonTemporal(instr);
// Nontemporals are unimplemented.
V::VisitUnimplemented(instr);
}
} else {
V::VisitLoadStorePairPostIndex(instr);

2
deps/v8/src/arm64/decoder-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64.h"

1
deps/v8/src/arm64/decoder-arm64.h

@ -33,7 +33,6 @@ namespace internal {
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \

2
deps/v8/src/arm64/delayed-masm-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/delayed-masm-arm64.h"

5
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arm64/frames-arm64.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/safepoint-table.h"

21
deps/v8/src/arm64/disasm-arm64.cc

@ -7,8 +7,6 @@
#include <stdio.h>
#include <string.h>
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/decoder-arm64-inl.h"
@ -917,25 +915,6 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form;
switch (instr->Mask(LoadStorePairNonTemporalMask)) {
case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
default: form = "(LoadStorePairNonTemporal)";
}
Format(instr, mnemonic, form);
}
void Disassembler::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";

2
deps/v8/src/arm64/disasm-arm64.h

@ -5,8 +5,6 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "src/v8.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"

2
deps/v8/src/arm64/frames-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"

6
deps/v8/src/arm64/frames-arm64.h

@ -63,12 +63,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_

2
deps/v8/src/arm64/instructions-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_FP_STATICS

6
deps/v8/src/arm64/instrument-arm64.cc

@ -364,12 +364,6 @@ void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
}
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");

40
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@ -36,7 +34,11 @@ const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return x2; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
const Register InstanceofDescriptor::left() {
@ -68,6 +70,14 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
void StoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
@ -92,6 +102,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
}
// static
const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
@ -181,10 +195,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : original constructor (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x2};
Register registers[] = {x0, x1, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -389,11 +404,22 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void MathRoundVariantCallFromOptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
x4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

54
deps/v8/src/arm64/lithium-arm64.cc

@ -4,8 +4,6 @@
#include <sstream>
#include "src/v8.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/hydrogen-osr.h"
#include "src/lithium-inl.h"
@ -296,6 +294,11 @@ void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
void LLoadGlobalViaContext::PrintDataTo(StringStream* stream) {
stream->Add("depth:%d slot:%d", depth(), slot_index());
}
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
std::ostringstream os;
@ -315,6 +318,12 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
void LStoreGlobalViaContext::PrintDataTo(StringStream* stream) {
stream->Add("depth:%d slot:%d <- ", depth(), slot_index());
value()->PrintTo(stream);
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
left()->PrintTo(stream);
@ -887,8 +896,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsExternal()) {
DCHECK(instr->left()->representation().IsExternal());
DCHECK(instr->right()->representation().IsInteger32());
DCHECK(instr->IsConsistentExternalRepresentation());
DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
@ -1203,7 +1211,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (to.IsSmi()) {
LOperand* value = UseRegisterAtStart(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
if (val->CheckFlag(HInstruction::kUint32)) {
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
@ -1703,13 +1711,22 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
LInstruction* LChunkBuilder::DoLoadGlobalViaContext(
HLoadGlobalViaContext* instr) {
LOperand* context = UseFixed(instr->context(), cp);
DCHECK(instr->slot_index() > 0);
LLoadGlobalViaContext* result = new (zone()) LLoadGlobalViaContext(context);
return MarkAsCall(DefineFixed(result, x0), instr);
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
if (!instr->is_typed_elements()) {
if (!instr->is_fixed_typed_array()) {
if (instr->representation().IsDouble()) {
LOperand* temp = (!instr->key()->IsConstant() ||
instr->RequiresHoleCheck())
@ -1743,8 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LInstruction* result = DefineAsRegister(
new(zone()) LLoadKeyedExternal(elements, key, temp));
if ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS) &&
if (elements_kind == UINT32_ELEMENTS &&
!instr->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result);
}
@ -2348,7 +2364,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* elements = NULL;
LOperand* val = NULL;
if (!instr->is_typed_elements() &&
if (!instr->is_fixed_typed_array() &&
instr->value()->representation().IsTagged() &&
instr->NeedsWriteBarrier()) {
// RecordWrite() will clobber all registers.
@ -2361,15 +2377,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
temp = instr->key()->IsConstant() ? NULL : TempRegister();
}
if (instr->is_typed_elements()) {
if (instr->is_fixed_typed_array()) {
DCHECK((instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
DCHECK(instr->elements()->representation().IsExternal());
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
} else if (instr->value()->representation().IsDouble()) {
@ -2457,6 +2470,19 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoStoreGlobalViaContext(
HStoreGlobalViaContext* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(),
StoreGlobalViaContextDescriptor::ValueRegister());
DCHECK(instr->slot_index() > 0);
LStoreGlobalViaContext* result =
new (zone()) LStoreGlobalViaContext(context, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* left = UseFixed(instr->left(), x1);

42
deps/v8/src/arm64/lithium-arm64.h

@ -104,6 +104,7 @@ class LCodeGen;
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalGeneric) \
V(LoadGlobalViaContext) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
@ -152,6 +153,7 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
V(StoreGlobalViaContext) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@ -1673,6 +1675,22 @@ class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
};
class LLoadGlobalViaContext final : public LTemplateInstruction<1, 1, 1> {
public:
explicit LLoadGlobalViaContext(LOperand* context) { inputs_[0] = context; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalViaContext, "load-global-via-context")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalViaContext)
void PrintDataTo(StringStream* stream) override;
LOperand* context() { return inputs_[0]; }
int depth() const { return hydrogen()->depth(); }
int slot_index() const { return hydrogen()->slot_index(); }
};
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@ -1748,7 +1766,7 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
};
@ -2455,6 +2473,28 @@ class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
};
class LStoreGlobalViaContext final : public LTemplateInstruction<0, 2, 0> {
public:
LStoreGlobalViaContext(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalViaContext,
"store-global-via-context")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalViaContext)
void PrintDataTo(StringStream* stream) override;
int depth() { return hydrogen()->depth(); }
int slot_index() { return hydrogen()->slot_index(); }
LanguageMode language_mode() { return hydrogen()->language_mode(); }
};
template<int T>
class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
public:

140
deps/v8/src/arm64/lithium-codegen-arm64.cc

@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arm64/frames-arm64.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"
#include "src/base/bits.h"
@ -276,15 +275,23 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
if (op->IsStackSlot()) {
int index = op->index();
if (index >= 0) {
index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
}
if (is_tagged) {
translation->StoreStackSlot(op->index());
translation->StoreStackSlot(index);
} else if (is_uint32) {
translation->StoreUint32StackSlot(op->index());
translation->StoreUint32StackSlot(index);
} else {
translation->StoreInt32StackSlot(op->index());
translation->StoreInt32StackSlot(index);
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
int index = op->index();
if (index >= 0) {
index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
}
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@ -1476,9 +1483,14 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoAddE(LAddE* instr) {
Register result = ToRegister(instr->result());
Register left = ToRegister(instr->left());
Operand right = (instr->right()->IsConstantOperand())
? ToInteger32(LConstantOperand::cast(instr->right()))
: Operand(ToRegister32(instr->right()), SXTW);
Operand right = Operand(x0); // Dummy initialization.
if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
right = Operand(ToRegister(instr->right()));
} else if (instr->right()->IsConstantOperand()) {
right = ToInteger32(LConstantOperand::cast(instr->right()));
} else {
right = Operand(ToRegister32(instr->right()), SXTW);
}
DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
__ Add(result, left, right);
@ -1926,6 +1938,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ B(eq, true_label);
}
if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
// SIMD value -> true.
__ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
__ B(eq, true_label);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@ -3362,13 +3380,31 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(x0));
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
__ Mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
Handle<Code> stub =
CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
}
}
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@ -3426,42 +3462,33 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
elements_kind,
instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result.S(), mem_op);
__ Fcvt(result, result.S());
} else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
(elements_kind == FLOAT64_ELEMENTS)) {
} else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, mem_op);
} else {
Register result = ToRegister(instr->result());
switch (elements_kind) {
case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ Ldrsb(result, mem_op);
break;
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ Ldrb(result, mem_op);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ Ldrsh(result, mem_op);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Ldrh(result, mem_op);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ Ldrsw(result, mem_op);
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Ldr(result.W(), mem_op);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
@ -3472,8 +3499,6 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
@ -3692,7 +3717,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@ -5017,8 +5042,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(cp, scratch1, scratch2); // The context is the first argument.
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
__ Push(scratch1, scratch2);
CallRuntime(Runtime::kDeclareGlobals, 2, instr);
}
@ -5148,44 +5173,33 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
elements_kind,
instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
if (elements_kind == FLOAT32_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
DoubleRegister dbl_scratch = double_scratch();
__ Fcvt(dbl_scratch.S(), value);
__ Str(dbl_scratch.S(), dst);
} else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
(elements_kind == FLOAT64_ELEMENTS)) {
} else if (elements_kind == FLOAT64_ELEMENTS) {
DoubleRegister value = ToDoubleRegister(instr->value());
__ Str(value, dst);
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_INT8_ELEMENTS:
case EXTERNAL_UINT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
__ Strb(value, dst);
break;
case EXTERNAL_INT16_ELEMENTS:
case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ Strh(value, dst);
break;
case EXTERNAL_INT32_ELEMENTS:
case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ Str(value.W(), dst);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@ -5507,6 +5521,30 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->value())
.is(StoreGlobalViaContextDescriptor::ValueRegister()));
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
__ Mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
isolate(), depth, instr->language_mode())
.code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ Push(StoreGlobalViaContextDescriptor::ValueRegister());
__ CallRuntime(is_strict(instr->language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2);
}
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).Is(x1));
@ -5907,10 +5945,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register scratch = ToRegister(instr->temp2());
__ JumpIfSmi(value, false_label);
__ JumpIfObjectType(
value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
__ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
EmitBranch(instr, lt);
} else if (String::Equals(type_name, factory->symbol_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@ -5962,6 +5998,20 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
} else if (String::Equals(type_name, factory->type##_string())) { \
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL)); \
Register map = ToRegister(instr->temp1()); \
\
__ JumpIfSmi(value, false_label); \
__ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); \
__ CompareRoot(map, Heap::k##Type##MapRootIndex); \
EmitBranch(instr, eq);
SIMD128_TYPES(SIMD128_TYPE)
#undef SIMD128_TYPE
// clang-format on
} else {
__ B(false_label);
}

2
deps/v8/src/arm64/lithium-gap-resolver-arm64.cc

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/arm64/delayed-masm-arm64-inl.h"
#include "src/arm64/lithium-codegen-arm64.h"
#include "src/arm64/lithium-gap-resolver-arm64.h"

2
deps/v8/src/arm64/lithium-gap-resolver-arm64.h

@ -5,8 +5,6 @@
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#include "src/v8.h"
#include "src/arm64/delayed-masm-arm64.h"
#include "src/lithium.h"

17
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -869,15 +869,6 @@ void MacroAssembler::Isb() {
}
void MacroAssembler::Ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
DCHECK(!AreAliased(rt, rt2));
ldnp(rt, rt2, src);
}
void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
DCHECK(allow_macro_instructions_);
ldr(rt, imm);
@ -1134,14 +1125,6 @@ void MacroAssembler::Umull(const Register& rd, const Register& rn,
}
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
stnp(rt, rt2, dst);
}
void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());

62
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -907,6 +906,25 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
}
void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5,
const CPURegister& dst6, const CPURegister& dst7) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
DCHECK(dst0.IsValid());
int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
int size = dst0.SizeInBytes();
PopHelper(4, size, dst0, dst1, dst2, dst3);
PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
PopPostamble(count, size);
}
void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
int size = src0.SizeInBytes() + src1.SizeInBytes();
@ -3030,10 +3048,10 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
}
@ -3223,26 +3241,6 @@ void MacroAssembler::Allocate(Register object_size,
}
void MacroAssembler::UndoAllocationInNewSpace(Register object,
Register scratch) {
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
Bic(object, object, kHeapObjectTagMask);
#ifdef DEBUG
// Check that the object un-allocated is below the current top.
Mov(scratch, new_space_allocation_top);
Ldr(scratch, MemOperand(scratch));
Cmp(object, scratch);
Check(lt, kUndoAllocationOfNonAllocatedMemory);
#endif
// Write the address of the object to un-allocate as the current top.
Mov(scratch, new_space_allocation_top);
Str(object, MemOperand(scratch));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@ -4417,21 +4415,29 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register scratch1,
Label* found) {
DCHECK(!AreAliased(object, scratch0, scratch1));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
Label loop_again, end;
// Scratch contains elements pointer.
Mov(current, object);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
// Loop based on the map going up the prototype chain.
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
B(lo, found);
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
Bind(&end);
}

33
deps/v8/src/arm64/macro-assembler-arm64.h

@ -7,11 +7,10 @@
#include <vector>
#include "src/arm64/assembler-arm64.h"
#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
#include "src/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@ -34,6 +33,20 @@
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
#define kReturnRegister0 x0
#define kReturnRegister1 x1
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kInterpreterAccumulatorRegister x0
#define kInterpreterRegisterFileRegister x18
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
V(Strb, Register&, rt, STRB_w) \
@ -569,6 +582,10 @@ class MacroAssembler : public Assembler {
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5 = NoReg,
const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
void Push(const Register& src0, const FPRegister& src1);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
@ -1305,12 +1322,6 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. The caller must make sure that no pointers
// are left to the object(s) no longer allocated as they would be invalid when
// allocation is undone.
void UndoAllocationInNewSpace(Register object, Register scratch);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@ -1771,7 +1782,7 @@ class MacroAssembler : public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
@ -2235,7 +2246,7 @@ class UseScratchRegisterScope {
};
inline MemOperand ContextMemOperand(Register context, int index) {
inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}

21
deps/v8/src/arm64/simulator-arm64.cc

@ -5,7 +5,6 @@
#include <stdlib.h>
#include <cmath>
#include <cstdarg>
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
@ -223,6 +222,9 @@ int64_t Simulator::CallRegExp(byte* entry,
void Simulator::CheckPCSComplianceAndRun() {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
#ifdef DEBUG
CHECK_EQ(kNumberOfCalleeSavedRegisters, kCalleeSaved.Count());
CHECK_EQ(kNumberOfCalleeSavedFPRegisters, kCalleeSavedFP.Count());
@ -333,9 +335,15 @@ uintptr_t Simulator::PopAddress() {
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// The simulator uses a separate JS stack. If we have exhausted the C stack,
// we also drop down the JS limit to reflect the exhaustion on the JS stack.
if (GetCurrentStackPosition() < c_limit) {
return reinterpret_cast<uintptr_t>(get_sp());
}
// Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
// to prevent overrunning the stack when pushing values.
return stack_limit_ + 1024;
}
@ -1676,11 +1684,6 @@ void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
}
void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
LoadStorePairHelper(instr, Offset);
}
void Simulator::LoadStorePairHelper(Instruction* instr,
AddrMode addrmode) {
unsigned rt = instr->Rt();

13
deps/v8/src/arm64/simulator-arm64.h

@ -8,8 +8,6 @@
#include <stdarg.h>
#include <vector>
#include "src/v8.h"
#include "src/allocation.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@ -268,7 +266,7 @@ class Simulator : public DecoderVisitor {
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
uintptr_t StackLimit(uintptr_t c_limit) const;
void ResetState();
@ -403,7 +401,7 @@ class Simulator : public DecoderVisitor {
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
Address get_sp() const { return reg<Address>(31, Reg31IsStackPointer); }
template<typename T>
T fpreg(unsigned code) const {
@ -884,13 +882,14 @@ class Simulator : public DecoderVisitor {
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
// See also 'class SimulatorStack' in arm/simulator-arm.h.
// the C-based native code. The JS-based limit normally points near the end of
// the simulator stack. When the C-based limit is exhausted we reflect that by
// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
return Simulator::current(isolate)->StackLimit(c_limit);
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {

1
deps/v8/src/arm64/utils-arm64.h

@ -6,7 +6,6 @@
#define V8_ARM64_UTILS_ARM64_H_
#include <cmath>
#include "src/v8.h"
#include "src/arm64/constants-arm64.h"

12
deps/v8/src/array-iterator.js

@ -45,7 +45,7 @@ function ArrayIterator() {}
// 15.4.5.1 CreateArrayIterator Abstract Operation
function CreateArrayIterator(array, kind) {
var object = $toObject(array);
var object = TO_OBJECT(array);
var iterator = new ArrayIterator;
SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
@ -68,7 +68,7 @@ function ArrayIteratorIterator() {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = $toObject(this);
var iterator = TO_OBJECT(this);
if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@ -138,6 +138,10 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
'keys', ArrayKeys
]);
// TODO(adam): Remove this call once 'values' is in the above
// InstallFunctions block, as it'll be redundant.
utils.SetFunctionName(ArrayValues, 'values');
%AddNamedProperty(GlobalArray.prototype, symbolIterator, ArrayValues,
DONT_ENUM);
@ -160,4 +164,8 @@ utils.Export(function(to) {
$arrayValues = ArrayValues;
utils.ExportToRuntime(function(to) {
to.ArrayValues = ArrayValues;
});
})

107
deps/v8/src/array.js

@ -19,16 +19,17 @@ var $arrayUnshift;
// -------------------------------------------------------------------
// Imports
var Delete;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var Delete;
var MathMin;
var ObjectHasOwnProperty;
var ObjectIsFrozen;
var ObjectIsSealed;
var ObjectToString;
var ToNumber;
var ToString;
utils.Import(function(from) {
Delete = from.Delete;
@ -37,6 +38,8 @@ utils.Import(function(from) {
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectIsSealed = from.ObjectIsSealed;
ObjectToString = from.ObjectToString;
ToNumber = from.ToNumber;
ToString = from.ToString;
});
// -------------------------------------------------------------------
@ -216,7 +219,7 @@ function ConvertToString(x) {
// Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : $toString($defaultString(x));
return (IS_NULL_OR_UNDEFINED(x)) ? '' : ToString($defaultString(x));
}
@ -227,8 +230,8 @@ function ConvertToLocaleString(e) {
// According to ES5, section 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
var e_obj = $toObject(e);
return $toString(e_obj.toLocaleString());
var e_obj = TO_OBJECT(e);
return ToString(e_obj.toLocaleString());
}
}
@ -388,7 +391,7 @@ function ArrayToString() {
}
array = this;
} else {
array = $toObject(this);
array = TO_OBJECT(this);
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
@ -406,7 +409,7 @@ function InnerArrayToLocaleString(array, length) {
function ArrayToLocaleString() {
var array = $toObject(this);
var array = TO_OBJECT(this);
var arrayLen = array.length;
return InnerArrayToLocaleString(array, arrayLen);
}
@ -437,7 +440,7 @@ function InnerArrayJoin(separator, array, length) {
function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayJoin(separator, array, length);
@ -466,7 +469,7 @@ function ObservedArrayPop(n) {
function ArrayPop() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var n = TO_UINT32(array.length);
if (n == 0) {
array.length = n;
@ -512,7 +515,7 @@ function ArrayPush() {
if (%IsObserved(this))
return ObservedArrayPush.apply(this, arguments);
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var n = TO_UINT32(array.length);
var m = %_ArgumentsLength();
@ -532,7 +535,7 @@ function ArrayPush() {
function ArrayConcatJS(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
var array = $toObject(this);
var array = TO_OBJECT(this);
var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count);
arrays[0] = array;
@ -587,14 +590,25 @@ function SparseReverse(array, len) {
}
}
function InnerArrayReverse(array, len) {
function PackedArrayReverse(array, len) {
var j = len - 1;
for (var i = 0; i < j; i++, j--) {
var current_i = array[i];
if (!IS_UNDEFINED(current_i) || i in array) {
var current_j = array[j];
if (!IS_UNDEFINED(current_j) || j in array) {
var current_j = array[j];
array[i] = current_j;
array[j] = current_i;
}
return array;
}
function GenericArrayReverse(array, len) {
var j = len - 1;
for (var i = 0; i < j; i++, j--) {
if (i in array) {
var current_i = array[i];
if (j in array) {
var current_j = array[j];
array[i] = current_j;
array[j] = current_i;
} else {
@ -602,8 +616,8 @@ function InnerArrayReverse(array, len) {
delete array[i];
}
} else {
var current_j = array[j];
if (!IS_UNDEFINED(current_j) || j in array) {
if (j in array) {
var current_j = array[j];
array[i] = current_j;
delete array[j];
}
@ -616,16 +630,19 @@ function InnerArrayReverse(array, len) {
function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var isArray = IS_ARRAY(array);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
if (UseSparseVariant(array, len, isArray, len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
} else if (isArray && %_HasFastPackedElements(array)) {
return PackedArrayReverse(array, len);
} else {
return GenericArrayReverse(array, len);
}
return InnerArrayReverse(array, len);
}
@ -648,7 +665,7 @@ function ObservedArrayShift(len) {
function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
if (len === 0) {
@ -702,7 +719,7 @@ function ArrayUnshift(arg1) { // length == 1
if (%IsObserved(this))
return ObservedArrayUnshift.apply(this, arguments);
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var num_arguments = %_ArgumentsLength();
@ -726,7 +743,7 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@ -844,7 +861,7 @@ function ArraySplice(start, delete_count) {
return ObservedArraySplice.apply(this, arguments);
var num_arguments = %_ArgumentsLength();
var array = TO_OBJECT_INLINE(this);
var array = TO_OBJECT(this);
var len = TO_UINT32(array.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
@ -900,8 +917,8 @@ function InnerArraySort(length, comparefn) {
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
x = $toString(x);
y = $toString(y);
x = ToString(x);
y = ToString(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
@ -1176,7 +1193,7 @@ function InnerArraySort(length, comparefn) {
function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
var array = $toObject(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return %_CallFunction(array, length, comparefn, InnerArraySort);
}
@ -1203,7 +1220,7 @@ function InnerArrayFilter(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
@ -1217,8 +1234,8 @@ function ArrayFilter(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
var accumulator = InnerArrayFilter(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
@ -1241,7 +1258,7 @@ function InnerArrayForEach(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
%_CallFunction(new_receiver, element, i, array, f);
}
}
@ -1252,7 +1269,7 @@ function ArrayForEach(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
InnerArrayForEach(f, receiver, array, length);
}
@ -1274,7 +1291,7 @@ function InnerArraySome(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) return true;
}
}
@ -1289,7 +1306,7 @@ function ArraySome(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArraySome(f, receiver, array, length);
}
@ -1311,7 +1328,7 @@ function InnerArrayEvery(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
if (!%_CallFunction(new_receiver, element, i, array, f)) return false;
}
}
@ -1323,7 +1340,7 @@ function ArrayEvery(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
@ -1346,7 +1363,7 @@ function InnerArrayMap(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
@ -1359,7 +1376,7 @@ function ArrayMap(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
var accumulator = InnerArrayMap(f, receiver, array, length);
var result = new GlobalArray();
@ -1528,8 +1545,8 @@ function ArrayReduce(callback, current) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayReduce(callback, current, array, length,
%_ArgumentsLength());
}
@ -1571,8 +1588,8 @@ function ArrayReduceRight(callback, current) {
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = $toObject(this);
var length = $toUint32(array.length);
var array = TO_OBJECT(this);
var length = TO_UINT32(array.length);
return InnerArrayReduceRight(callback, current, array, length,
%_ArgumentsLength());
}
@ -1688,10 +1705,10 @@ utils.Export(function(to) {
to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
to.InnerArrayReverse = InnerArrayReverse;
to.InnerArraySome = InnerArraySome;
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
to.PackedArrayReverse = PackedArrayReverse;
});
$arrayConcat = ArrayConcatJS;

3
deps/v8/src/arraybuffer.js

@ -13,13 +13,14 @@
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalObject = global.Object;
var MathMax;
var MathMin;
var ToNumber;
utils.Import(function(from) {
MathMax = from.MathMax;
MathMin = from.MathMin;
ToNumber = from.ToNumber;
});
// -------------------------------------------------------------------

413
deps/v8/src/assembler.cc

@ -45,14 +45,14 @@
#include "src/codegen.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serialize.h"
#include "src/token.h"
@ -80,21 +80,21 @@
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "src/ia32/regexp-macro-assembler-ia32.h" // NOLINT
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/x64/regexp-macro-assembler-x64.h" // NOLINT
#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/regexp-macro-assembler-x87.h" // NOLINT
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@ -158,6 +158,10 @@ AssemblerBase::~AssemblerBase() {
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
: PredictableCodeSizeScope(assembler, -1) {}
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@ -248,44 +252,22 @@ int Label::pos() const {
// 10: short_data_record: [6-bit pc delta] 10 followed by
// [6-bit data delta] [2-bit data type tag]
//
// 11: long_record [2-bit high tag][4 bit middle_tag] 11
// followed by variable data depending on type.
// 11: long_record [6 bit reloc mode] 11
// followed by pc delta
// followed by optional data depending on type.
//
// 2-bit data type tags, used in short_data_record and data_jump long_record:
// code_target_with_id: 00
// position: 01
// statement_position: 10
// comment: 11 (not used in short_data_record)
// deopt_reason: 11 (not used in long_data_record)
//
// Long record format:
// 4-bit middle_tag:
// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
// and is between 0000 and 1100)
// The format is:
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
// deopt_reason: 11
//
// 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
// The format is: [2-bit sub-type] 1101 11
// signed int (size of the pool).
// The 2-bit sub-types are:
// 00: constant pool
// 01: veneer pool
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
// (except data_type code_target_with_id, which
// is followed by a signed int, not intptr_t.)
//
// 1111: long_pc_jump
// The format is:
// pc-jump: 00 1111 11,
// 00 [6 bits pc delta]
// or
// pc-jump (variable length):
// 01 1111 11,
// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
// 6 bits and a part that does not. The latter is encoded as a long record
// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
// the following record in the usual way. The long pc jump record has variable
// length:
// pc-jump: [PC_JUMP] 11
// [7 bits data] 0
// ...
// [7 bits data] 1
@ -294,51 +276,37 @@ int Label::pos() const {
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4;
const int kLocatableTypeTagBits = 2;
const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
const int kLongTagBits = 6;
const int kShortDataTypeTagBits = 2;
const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
const int kLocatableTag = 2;
const int kDefaultTag = 3;
const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
const int kVariableLengthPCJumpTopTag = 1;
const int kChunkBits = 7;
const int kChunkMask = (1 << kChunkBits) - 1;
const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
const int kCodeWithIdTag = 0;
const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
// Reuse the same value for deopt reason tag in short record format.
// It is possible because we use kCommentTag only for the long record format.
const int kDeoptReasonTag = 3;
const int kPoolExtraTag = kPCJumpExtraTag - 2;
const int kConstPoolTag = 0;
const int kVeneerPoolTag = 1;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
// not fit in the kSmallPCDeltaBits bits.
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
DCHECK(pc_jump > 0);
// Write kChunkBits size chunks of the pc_jump.
@ -353,55 +321,42 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
}
void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
// Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
pc_delta = WriteVariableLengthPCJump(pc_delta);
void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
// Write a byte of tagged pc-delta, possibly preceded by an explicit pc-jump.
pc_delta = WriteLongPCJump(pc_delta);
*--pos_ = pc_delta << kTagBits | tag;
}
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
*--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
void RelocInfoWriter::WriteShortTaggedData(intptr_t data_delta, int tag) {
*--pos_ = static_cast<byte>(data_delta << kShortDataTypeTagBits | tag);
}
void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
*--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
extra_tag << kTagBits |
kDefaultTag);
void RelocInfoWriter::WriteMode(RelocInfo::Mode rmode) {
STATIC_ASSERT(RelocInfo::NUMBER_OF_MODES <= (1 << kLongTagBits));
*--pos_ = static_cast<int>((rmode << kTagBits) | kDefaultTag);
}
void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
void RelocInfoWriter::WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode) {
// Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
pc_delta = WriteVariableLengthPCJump(pc_delta);
WriteExtraTag(extra_tag, 0);
pc_delta = WriteLongPCJump(pc_delta);
WriteMode(rmode);
*--pos_ = pc_delta;
}
void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
}
void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
WriteExtraTag(kPoolExtraTag, pool_type);
void RelocInfoWriter::WriteIntData(int number) {
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data);
*--pos_ = static_cast<byte>(number);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data = data >> kBitsPerByte;
number = number >> kBitsPerByte;
}
}
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
void RelocInfoWriter::WriteData(intptr_t data_delta) {
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
@ -415,13 +370,13 @@ void RelocInfoWriter::WritePosition(int pc_delta, int pos_delta,
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
: kStatementPositionTag;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(pos_delta, kSmallDataBits)) {
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(pos_delta, pos_type_tag);
if (is_intn(pos_delta, kShortDataBits)) {
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(pos_delta, pos_type_tag);
} else {
// Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedIntData(pos_delta, pos_type_tag);
WriteModeAndPC(pc_delta, rmode);
WriteIntData(pos_delta);
}
}
@ -452,28 +407,28 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
// The two most common modes are given small tags, and usually fit in a byte.
if (rmode == RelocInfo::EMBEDDED_OBJECT) {
WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kSmallDataBits)) {
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(id_delta, kCodeWithIdTag);
if (is_intn(id_delta, kShortDataBits)) {
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(id_delta, kCodeWithIdTag);
} else {
// Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
WriteModeAndPC(pc_delta, rmode);
WriteIntData(id_delta);
}
last_id_ = static_cast<int>(rinfo->data());
} else if (rmode == RelocInfo::DEOPT_REASON) {
DCHECK(rinfo->data() < (1 << kSmallDataBits));
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(rinfo->data(), kDeoptReasonTag);
DCHECK(rinfo->data() < (1 << kShortDataBits));
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
@ -492,27 +447,15 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
next_position_candidate_flushed_ = false;
}
last_position_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsComment(rmode)) {
// Comments are normally not generated, so we use the costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
DCHECK(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
RelocInfo::IsConstPool(rmode) ? kConstPoolTag
: kVeneerPoolTag);
} else {
DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
DCHECK(rmode <= RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM);
STATIC_ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM -
RelocInfo::LAST_COMPACT_ENUM <=
kPoolExtraTag);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM - 1;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
DCHECK(0 <= saved_mode && saved_mode < kPoolExtraTag);
WriteExtraTaggedPC(pc_delta, saved_mode);
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
last_pc_ = rinfo->pc();
last_mode_ = rmode;
@ -527,17 +470,13 @@ inline int RelocIterator::AdvanceGetTag() {
}
inline int RelocIterator::GetExtraTag() {
return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
}
inline int RelocIterator::GetTopTag() {
return *pos_ >> (kTagBits + kExtraTagBits);
inline RelocInfo::Mode RelocIterator::GetMode() {
return static_cast<RelocInfo::Mode>((*pos_ >> kTagBits) &
((1 << kLongTagBits) - 1));
}
inline void RelocIterator::ReadTaggedPC() {
inline void RelocIterator::ReadShortTaggedPC() {
rinfo_.pc_ += *pos_ >> kTagBits;
}
@ -557,7 +496,7 @@ void RelocIterator::AdvanceReadId() {
}
void RelocIterator::AdvanceReadPoolData() {
void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
@ -585,7 +524,7 @@ void RelocIterator::AdvanceReadData() {
}
void RelocIterator::AdvanceReadVariableLengthPCJump() {
void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
// pc jump in kChunkBits bit chunks and shift them into place.
// Stop when the last chunk is encountered.
@ -601,28 +540,28 @@ void RelocIterator::AdvanceReadVariableLengthPCJump() {
}
inline int RelocIterator::GetLocatableTypeTag() {
return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
inline int RelocIterator::GetShortDataTypeTag() {
return *pos_ & ((1 << kShortDataTypeTagBits) - 1);
}
inline void RelocIterator::ReadTaggedId() {
inline void RelocIterator::ReadShortTaggedId() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
last_id_ += signed_b >> kLocatableTypeTagBits;
last_id_ += signed_b >> kShortDataTypeTagBits;
rinfo_.data_ = last_id_;
}
inline void RelocIterator::ReadTaggedPosition() {
inline void RelocIterator::ReadShortTaggedPosition() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
last_position_ += signed_b >> kLocatableTypeTagBits;
last_position_ += signed_b >> kShortDataTypeTagBits;
rinfo_.data_ = last_position_;
}
inline void RelocIterator::ReadTaggedData() {
inline void RelocIterator::ReadShortTaggedData() {
uint8_t unsigned_b = *pos_;
rinfo_.data_ = unsigned_b >> kTagBits;
}
@ -647,79 +586,74 @@ void RelocIterator::next() {
while (pos_ > end_) {
int tag = AdvanceGetTag();
if (tag == kEmbeddedObjectTag) {
ReadTaggedPC();
ReadShortTaggedPC();
if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
} else if (tag == kCodeTargetTag) {
ReadTaggedPC();
ReadShortTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
} else if (tag == kLocatableTag) {
ReadTaggedPC();
ReadShortTaggedPC();
Advance();
int locatable_tag = GetLocatableTypeTag();
if (locatable_tag == kCodeWithIdTag) {
int data_type_tag = GetShortDataTypeTag();
if (data_type_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
ReadTaggedId();
ReadShortTaggedId();
return;
}
} else if (data_type_tag == kDeoptReasonTag) {
if (SetMode(RelocInfo::DEOPT_REASON)) {
ReadShortTaggedData();
return;
}
} else if (locatable_tag == kDeoptReasonTag) {
ReadTaggedData();
if (SetMode(RelocInfo::DEOPT_REASON)) return;
} else {
DCHECK(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
DCHECK(data_type_tag == kNonstatementPositionTag ||
data_type_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
ReadTaggedPosition();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
// Always update the position if we are interested in either
// statement positions or non-statement positions.
ReadShortTaggedPosition();
if (SetMode(GetPositionModeFromTag(data_type_tag))) return;
}
}
} else {
DCHECK(tag == kDefaultTag);
int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpExtraTag) {
if (GetTopTag() == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
}
} else if (extra_tag == kDataJumpExtraTag) {
int locatable_tag = GetTopTag();
if (locatable_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
RelocInfo::Mode rmode = GetMode();
if (rmode == RelocInfo::PC_JUMP) {
AdvanceReadLongPCJump();
} else {
AdvanceReadPC();
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
if (SetMode(rmode)) {
AdvanceReadId();
return;
}
Advance(kIntSize);
} else if (locatable_tag != kCommentTag) {
DCHECK(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
} else if (RelocInfo::IsComment(rmode)) {
if (SetMode(rmode)) {
AdvanceReadData();
return;
}
Advance(kIntptrSize);
} else if (RelocInfo::IsPosition(rmode)) {
if (mode_mask_ & RelocInfo::kPositionMask) {
// Always update the position if we are interested in either
// statement positions or non-statement positions.
AdvanceReadPosition();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
if (SetMode(rmode)) return;
} else {
Advance(kIntSize);
}
} else {
DCHECK(locatable_tag == kCommentTag);
if (SetMode(RelocInfo::COMMENT)) {
AdvanceReadData();
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
}
Advance(kIntptrSize);
}
} else if (extra_tag == kPoolExtraTag) {
int pool_type = GetTopTag();
DCHECK(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
if (SetMode(rmode)) {
AdvanceReadPoolData();
Advance(kIntSize);
} else if (SetMode(static_cast<RelocInfo::Mode>(rmode))) {
return;
}
Advance(kIntSize);
} else {
AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM + 1;
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
}
@ -799,49 +733,56 @@ bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
case RelocInfo::NONE32:
case NONE32:
return "no reloc 32";
case RelocInfo::NONE64:
case NONE64:
return "no reloc 64";
case RelocInfo::EMBEDDED_OBJECT:
case EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
case CONSTRUCT_CALL:
return "code target (js construct call)";
case RelocInfo::DEBUG_BREAK:
return "debug break";
case RelocInfo::CODE_TARGET:
case DEBUGGER_STATEMENT:
return "debugger statement";
case CODE_TARGET:
return "code target";
case RelocInfo::CODE_TARGET_WITH_ID:
case CODE_TARGET_WITH_ID:
return "code target with id";
case RelocInfo::CELL:
case CELL:
return "property cell";
case RelocInfo::RUNTIME_ENTRY:
case RUNTIME_ENTRY:
return "runtime entry";
case RelocInfo::JS_RETURN:
return "js return";
case RelocInfo::COMMENT:
case COMMENT:
return "comment";
case RelocInfo::POSITION:
case POSITION:
return "position";
case RelocInfo::STATEMENT_POSITION:
case STATEMENT_POSITION:
return "statement position";
case RelocInfo::EXTERNAL_REFERENCE:
case EXTERNAL_REFERENCE:
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
return "internal reference";
case RelocInfo::INTERNAL_REFERENCE_ENCODED:
case INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
case RelocInfo::DEOPT_REASON:
case DEOPT_REASON:
return "deopt reason";
case RelocInfo::CONST_POOL:
case CONST_POOL:
return "constant pool";
case RelocInfo::VENEER_POOL:
case VENEER_POOL:
return "veneer pool";
case RelocInfo::DEBUG_BREAK_SLOT:
return "debug break slot";
case RelocInfo::CODE_AGE_SEQUENCE:
return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
case DEBUG_BREAK_SLOT_AT_POSITION:
return "debug break slot at position";
case DEBUG_BREAK_SLOT_AT_RETURN:
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
return "debug break slot at construct call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
return "generator continuation";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
return "number_of_modes";
}
@ -899,7 +840,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
case DEBUG_BREAK:
case DEBUGGER_STATEMENT:
case CONSTRUCT_CALL:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
@ -923,7 +864,6 @@ void RelocInfo::Verify(Isolate* isolate) {
break;
}
case RUNTIME_ENTRY:
case JS_RETURN:
case COMMENT:
case POSITION:
case STATEMENT_POSITION:
@ -931,11 +871,16 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT:
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
case GENERATOR_CONTINUATION:
case NONE32:
case NONE64:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
@ -946,6 +891,11 @@ void RelocInfo::Verify(Isolate* isolate) {
#endif // VERIFY_HEAP
int RelocInfo::DebugBreakCallArgumentsCount(intptr_t data) {
return static_cast<int>(data);
}
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
@ -1045,11 +995,6 @@ ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
}
ExternalReference::ExternalReference(const IC_Utility& ic_utility,
Isolate* isolate)
: address_(Redirect(isolate, ic_utility.address())) {}
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@ -1551,17 +1496,18 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
}
ExternalReference ExternalReference::debug_break(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
}
ExternalReference ExternalReference::debug_step_in_fp_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->step_in_fp_addr());
}
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
return ExternalReference(reinterpret_cast<void*>(
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
}
bool operator==(ExternalReference lhs, ExternalReference rhs) {
return lhs.address() == rhs.address();
}
@ -1860,16 +1806,17 @@ void Assembler::RecordComment(const char* msg) {
}
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
void Assembler::RecordGeneratorContinuation() {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::JS_RETURN);
RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
}
void Assembler::RecordDebugBreakSlot() {
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode, int call_argc) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
intptr_t data = static_cast<intptr_t>(call_argc);
RecordRelocInfo(mode, data);
}

150
deps/v8/src/assembler.h

@ -35,22 +35,22 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
#include "src/v8.h"
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/gdb-jit.h"
#include "src/isolate.h"
#include "src/runtime/runtime.h"
#include "src/token.h"
namespace v8 {
// Forward declarations.
class ApiFunction;
namespace internal {
// Forward declarations.
class StatsCounter;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@ -158,8 +158,10 @@ class DontEmitDebugCodeScope BASE_EMBEDDED {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(AssemblerBase* assembler);
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
void ExpectSize(int expected_size) { expected_size_ = expected_size; }
private:
AssemblerBase* assembler_;
@ -349,10 +351,9 @@ class RelocInfo {
// we do not normally record relocation info.
static const char* const kFillerCommentString;
// The minimum size of a comment is equal to three bytes for the extra tagged
// pc + the tag for the data, and kPointerSize for the actual pointer to the
// comment.
static const int kMinRelocCommentSize = 3 + kPointerSize;
// The minimum size of a comment is equal to two bytes for the extra tagged
// pc and kPointerSize for the actual pointer to the comment.
static const int kMinRelocCommentSize = 2 + kPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
@ -365,23 +366,31 @@ class RelocInfo {
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
DEBUG_BREAK, // Code target for the debugger statement.
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
JS_RETURN, // Marks start of the ExitJSFrame code.
COMMENT,
POSITION, // See comment for kNoPosition above.
STATEMENT_POSITION, // See comment for kNoPosition above.
DEBUG_BREAK_SLOT, // Additional code inserted for debug break slot.
// Additional code inserted for debug break slot.
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
// Continuation points for a generator yield.
GENERATOR_CONTINUATION,
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@ -389,9 +398,12 @@ class RelocInfo {
DEOPT_REASON, // Deoptimization reason index.
// add more as needed
// This is not an actual reloc mode, but used to encode a long pc jump that
// cannot be encoded as part of another record.
PC_JUMP,
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
@ -399,15 +411,12 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_GCED_ENUM = CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
@ -418,11 +427,6 @@ class RelocInfo {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsPseudoRelocMode(Mode mode) {
DCHECK(!IsRealRelocMode(mode));
return mode >= FIRST_PSEUDO_RELOC_MODE &&
mode <= LAST_PSEUDO_RELOC_MODE;
}
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@ -440,9 +444,6 @@ class RelocInfo {
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
}
static inline bool IsJSReturn(Mode mode) {
return mode == JS_RETURN;
}
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
}
@ -471,10 +472,24 @@ class RelocInfo {
return mode == INTERNAL_REFERENCE_ENCODED;
}
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
IsDebugBreakSlotAtCall(mode) ||
IsDebugBreakSlotAtConstructCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
}
static inline bool IsDebugBreakSlotAtReturn(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_RETURN;
}
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
static inline bool IsDebugBreakSlotAtConstructCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
}
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUG_BREAK;
return mode == DEBUGGER_STATEMENT;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
@ -482,6 +497,9 @@ class RelocInfo {
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline bool IsGeneratorContinuation(Mode mode) {
return mode == GENERATOR_CONTINUATION;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@ -492,10 +510,11 @@ class RelocInfo {
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
// inside the code (internal references).
// Do not forget to flush the icache afterwards!
INLINE(void apply(intptr_t delta));
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (e.g. relative or patched into a series of
@ -506,6 +525,8 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
static int DebugBreakCallArgumentsCount(intptr_t data);
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@ -579,11 +600,8 @@ class RelocInfo {
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
// instruction.
INLINE(Address call_address());
INLINE(void set_call_address(Address target));
INLINE(Object* call_object());
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
INLINE(Address debug_call_address());
INLINE(void set_debug_call_address(Address target));
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
@ -622,7 +640,10 @@ class RelocInfo {
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
static const int kApplyMask; // Modes affected by apply. Depends on arch.
static const int kDebugBreakSlotMask =
1 << DEBUG_BREAK_SLOT_AT_POSITION | 1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL | 1 << DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
// On ARM, note that pc_ is the address of the constant pool entry
@ -680,21 +701,22 @@ class RelocInfoWriter BASE_EMBEDDED {
void Finish() { FlushPosition(); }
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
// On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
// On x64 this is 1 + 4 + 1 + 1 + 8 == 15;
// Here we use the maximum of the two.
static const int kMaxSize = 16;
static const int kMaxSize = 15;
private:
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedPoolData(int data, int pool_type);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
inline void WriteShortTaggedData(intptr_t data_delta, int tag);
inline void WriteMode(RelocInfo::Mode rmode);
inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
inline void WriteIntData(int data_delta);
inline void WriteData(intptr_t data_delta);
inline void WritePosition(int pc_delta, int pos_delta, RelocInfo::Mode rmode);
void FlushPosition();
@ -745,19 +767,21 @@ class RelocIterator: public Malloced {
// *Get* just reads and returns info on current byte.
void Advance(int bytes = 1) { pos_ -= bytes; }
int AdvanceGetTag();
int GetExtraTag();
int GetTopTag();
void ReadTaggedPC();
RelocInfo::Mode GetMode();
void AdvanceReadLongPCJump();
int GetShortDataTypeTag();
void ReadShortTaggedPC();
void ReadShortTaggedId();
void ReadShortTaggedPosition();
void ReadShortTaggedData();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadPoolData();
void AdvanceReadInt();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
int GetLocatableTypeTag();
void ReadTaggedId();
void ReadTaggedPosition();
void ReadTaggedData();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
@ -781,7 +805,6 @@ class RelocIterator: public Malloced {
// External function
//----------------------------------------------------------------------------
class IC_Utility;
class SCTableReference;
class Debug_Address;
@ -851,8 +874,6 @@ class ExternalReference BASE_EMBEDDED {
ExternalReference(const Runtime::Function* f, Isolate* isolate);
ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
explicit ExternalReference(StatsCounter* counter);
ExternalReference(Isolate::AddressId id, Isolate* isolate);
@ -967,9 +988,6 @@ class ExternalReference BASE_EMBEDDED {
Address address() const { return reinterpret_cast<Address>(address_); }
// Function Debug::Break()
static ExternalReference debug_break(Isolate* isolate);
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address(Isolate* isolate);
@ -1002,6 +1020,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference stress_deopt_count(Isolate* isolate);
static ExternalReference fixed_typed_array_base_data_offset();
private:
explicit ExternalReference(void* address)
: address_(address) {}

2
deps/v8/src/assert-scope.cc

@ -6,7 +6,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/debug.h"
#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/utils.h"

4
deps/v8/src/ast-literal-reindexer.cc

@ -1,10 +1,10 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/ast.h"
#include "src/ast-literal-reindexer.h"
#include "src/ast.h"
#include "src/scopes.h"
namespace v8 {

2
deps/v8/src/ast-literal-reindexer.h

@ -5,8 +5,6 @@
#ifndef V8_AST_LITERAL_REINDEXER
#define V8_AST_LITERAL_REINDEXER
#include "src/v8.h"
#include "src/ast.h"
#include "src/scopes.h"

53
deps/v8/src/ast-numbering.cc

@ -2,19 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/ast-numbering.h"
#include "src/ast.h"
#include "src/ast-numbering.h"
#include "src/scopes.h"
namespace v8 {
namespace internal {
class AstNumberingVisitor final : public AstVisitor {
public:
explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
@ -33,6 +31,10 @@ class AstNumberingVisitor final : public AstVisitor {
bool Finish(FunctionLiteral* node);
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
void VisitStatements(ZoneList<Statement*>* statements) override;
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitArguments(ZoneList<Expression*>* arguments);
@ -46,7 +48,7 @@ class AstNumberingVisitor final : public AstVisitor {
void IncrementNodeCount() { properties_.add_node_count(1); }
void DisableSelfOptimization() {
properties_.flags()->Add(kDontSelfOptimize);
properties_.flags() |= AstProperties::kDontSelfOptimize;
}
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
@ -54,10 +56,11 @@ class AstNumberingVisitor final : public AstVisitor {
}
void DisableCrankshaft(BailoutReason reason) {
if (FLAG_turbo_shipping) {
return properties_.flags()->Add(kDontCrankshaft);
properties_.flags() |= AstProperties::kDontCrankshaft;
} else {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
template <typename Node>
@ -147,16 +150,21 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
if (node->var()->IsLookupSlot()) {
DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
}
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
VisitVariableProxyReference(node);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
@ -306,20 +314,35 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitPropertyReference(Property* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
}
void AstNumberingVisitor::VisitReference(Expression* expr) {
DCHECK(expr->IsProperty() || expr->IsVariableProxy());
if (expr->IsProperty()) {
VisitPropertyReference(expr->AsProperty());
} else {
VisitVariableProxyReference(expr->AsVariableProxy());
}
}
void AstNumberingVisitor::VisitProperty(Property* node) {
VisitPropertyReference(node);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
Visit(node->target());
VisitReference(node->target());
Visit(node->value());
ReserveFeedbackSlots(node);
}
@ -540,10 +563,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
VisitDeclarations(scope->declarations());
if (scope->is_function_scope() && scope->function() != NULL) {
// Visit the name of the named function expression.
Visit(scope->function());
}
VisitStatements(node->body());
return Finish(node);

10
deps/v8/src/ast-numbering.h

@ -8,12 +8,18 @@
namespace v8 {
namespace internal {
// Forward declarations.
class FunctionLiteral;
class Isolate;
class Zone;
namespace AstNumbering {
// Assign type feedback IDs and bailout IDs to an AST node tree.
//
bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
}
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_AST_NUMBERING_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save