Browse Source

deps: upgrade to V8 4.9.385.18

Pick up the current branch head for V8 4.9
https://github.com/v8/v8/commit/1ecba0f

PR-URL: https://github.com/nodejs/node/pull/4722
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Michaël Zasso <mic.besace@gmail.com>
process-exit-stdio-flushing
Ali Ijaz Sheikh 9 years ago
committed by Ali Sheikh
parent
commit
069e02ab47
  1. 2
      deps/v8/.gitignore
  2. 5
      deps/v8/AUTHORS
  3. 214
      deps/v8/BUILD.gn
  4. 2238
      deps/v8/ChangeLog
  5. 16
      deps/v8/DEPS
  6. 9
      deps/v8/Makefile
  7. 28
      deps/v8/PRESUBMIT.py
  8. 4
      deps/v8/README.md
  9. 14
      deps/v8/WATCHLISTS
  10. 2
      deps/v8/build/all.gyp
  11. 9
      deps/v8/build/features.gypi
  12. 1
      deps/v8/build/get_landmines.py
  13. 56
      deps/v8/build/standalone.gypi
  14. 125
      deps/v8/build/toolchain.gypi
  15. 2
      deps/v8/docs/README.md
  16. 205
      deps/v8/docs/arm_debugging_with_the_simulator.md
  17. 40
      deps/v8/docs/becoming_v8_committer.md
  18. 260
      deps/v8/docs/building_with_gyp.md
  19. 32
      deps/v8/docs/contributing.md
  20. 151
      deps/v8/docs/cross_compiling_for_arm.md
  21. 101
      deps/v8/docs/d8_on_android.md
  22. 934
      deps/v8/docs/debugger_protocol.md
  23. 63
      deps/v8/docs/gdb_jit_interface.md
  24. 24
      deps/v8/docs/handling_of_ports.md
  25. 44
      deps/v8/docs/i18n_support.md
  26. 6
      deps/v8/docs/javascript.md
  27. 161
      deps/v8/docs/javascript_stack_trace_api.md
  28. 67
      deps/v8/docs/merging_and_patching.md
  29. 34
      deps/v8/docs/profiling_chromium_with_v8.md
  30. 57
      deps/v8/docs/release_process.md
  31. 7
      deps/v8/docs/runtime_functions.md
  32. 41
      deps/v8/docs/source.md
  33. 58
      deps/v8/docs/testing.md
  34. 22
      deps/v8/docs/triaging_issues.md
  35. 147
      deps/v8/docs/using_git.md
  36. 3
      deps/v8/docs/v8_c_plus_plus_styleand_sops.md
  37. 39
      deps/v8/docs/v8_committers_responsibility.md
  38. 141
      deps/v8/docs/v8_profiler.md
  39. 28
      deps/v8/include/v8-debug.h
  40. 53
      deps/v8/include/v8-experimental.h
  41. 47
      deps/v8/include/v8-platform.h
  42. 2
      deps/v8/include/v8-testing.h
  43. 6
      deps/v8/include/v8-version.h
  44. 417
      deps/v8/include/v8.h
  45. 3
      deps/v8/include/v8config.h
  46. 37
      deps/v8/infra/config/cq.cfg
  47. 4
      deps/v8/samples/samples.gyp
  48. 4
      deps/v8/snapshot_toolchain.gni
  49. 6
      deps/v8/src/DEPS
  50. 2
      deps/v8/src/OWNERS
  51. 11
      deps/v8/src/accessors.cc
  52. 2
      deps/v8/src/allocation-site-scopes.h
  53. 126
      deps/v8/src/api-experimental.cc
  54. 28
      deps/v8/src/api-experimental.h
  55. 14
      deps/v8/src/api-natives.cc
  56. 774
      deps/v8/src/api.cc
  57. 14
      deps/v8/src/api.h
  58. 23
      deps/v8/src/arm/assembler-arm-inl.h
  59. 96
      deps/v8/src/arm/assembler-arm.cc
  60. 26
      deps/v8/src/arm/assembler-arm.h
  61. 1724
      deps/v8/src/arm/builtins-arm.cc
  62. 379
      deps/v8/src/arm/code-stubs-arm.cc
  63. 5
      deps/v8/src/arm/code-stubs-arm.h
  64. 65
      deps/v8/src/arm/codegen-arm.cc
  65. 2
      deps/v8/src/arm/codegen-arm.h
  66. 7
      deps/v8/src/arm/deoptimizer-arm.cc
  67. 8
      deps/v8/src/arm/disasm-arm.cc
  68. 59
      deps/v8/src/arm/interface-descriptors-arm.cc
  69. 510
      deps/v8/src/arm/macro-assembler-arm.cc
  70. 131
      deps/v8/src/arm/macro-assembler-arm.h
  71. 124
      deps/v8/src/arm/simulator-arm.cc
  72. 44
      deps/v8/src/arm/simulator-arm.h
  73. 24
      deps/v8/src/arm64/assembler-arm64-inl.h
  74. 34
      deps/v8/src/arm64/assembler-arm64.cc
  75. 30
      deps/v8/src/arm64/assembler-arm64.h
  76. 1817
      deps/v8/src/arm64/builtins-arm64.cc
  77. 435
      deps/v8/src/arm64/code-stubs-arm64.cc
  78. 1
      deps/v8/src/arm64/code-stubs-arm64.h
  79. 32
      deps/v8/src/arm64/codegen-arm64.cc
  80. 2
      deps/v8/src/arm64/codegen-arm64.h
  81. 8
      deps/v8/src/arm64/constants-arm64.h
  82. 3
      deps/v8/src/arm64/deoptimizer-arm64.cc
  83. 15
      deps/v8/src/arm64/instructions-arm64.cc
  84. 7
      deps/v8/src/arm64/instructions-arm64.h
  85. 67
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  86. 34
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  87. 605
      deps/v8/src/arm64/macro-assembler-arm64.cc
  88. 167
      deps/v8/src/arm64/macro-assembler-arm64.h
  89. 25
      deps/v8/src/arm64/simulator-arm64.cc
  90. 46
      deps/v8/src/arm64/simulator-arm64.h
  91. 68
      deps/v8/src/assembler.cc
  92. 74
      deps/v8/src/assembler.h
  93. 7
      deps/v8/src/ast/OWNERS
  94. 409
      deps/v8/src/ast/ast-expression-rewriter.cc
  95. 54
      deps/v8/src/ast/ast-expression-rewriter.h
  96. 25
      deps/v8/src/ast/ast-expression-visitor.cc
  97. 10
      deps/v8/src/ast/ast-expression-visitor.h
  98. 12
      deps/v8/src/ast/ast-literal-reindexer.cc
  99. 11
      deps/v8/src/ast/ast-literal-reindexer.h
  100. 27
      deps/v8/src/ast/ast-numbering.cc

2
deps/v8/.gitignore

@ -25,6 +25,7 @@
.cproject
.d8_history
.gclient_entries
.gdb_history
.landmines
.project
.pydevproject
@ -39,6 +40,7 @@ gcsuspects
shell
shell_g
/_*
/base/trace_event/common
/build/Debug
/build/gyp
/build/ipch/

5
deps/v8/AUTHORS

@ -32,6 +32,7 @@ StrongLoop, Inc. <*@strongloop.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
Akinori MUSHA <knu@FreeBSD.org>
Alex Kodat <akodat@rocketsoftware.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
@ -51,6 +52,7 @@ Daniel James <dnljms@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
Evan Lucas <evan.lucas@help.com>
Fedor Indutny <fedor@indutny.com>
Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
@ -102,7 +104,8 @@ Stefan Penner <stefan.penner@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
柳荣一 <admin@web-tinker.com>

214
deps/v8/BUILD.gn

@ -171,12 +171,22 @@ config("toolchain") {
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
}
if (v8_target_arch == "s390") {
defines += [ "V8_TARGET_ARCH_S390" ]
}
if (v8_target_arch == "s390x") {
defines += [
"V8_TARGET_ARCH_S390",
"V8_TARGET_ARCH_S390X",
]
}
if (v8_target_arch == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
}
if (v8_target_arch == "x64") {
defines += [ "V8_TARGET_ARCH_X64" ]
}
if (is_win) {
defines += [ "WIN32" ]
# TODO(jochen): Support v8_enable_prof.
@ -222,7 +232,6 @@ action("js2c") {
"src/js/uri.js",
"src/js/math.js",
"src/third_party/fdlibm/fdlibm.js",
"src/js/date.js",
"src/js/regexp.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
@ -266,40 +275,6 @@ action("js2c") {
}
}
action("js2c_code_stubs") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
sources = [
"src/js/macros.py",
"src/messages.h",
"src/js/code-stubs.js"
]
outputs = [
"$target_gen_dir/code-stub-libraries.cc",
]
args = [
rebase_path("$target_gen_dir/code-stub-libraries.cc",
root_build_dir),
"CODE_STUB",
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_code_stub.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_code_stub.bin", root_build_dir),
]
}
}
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -315,12 +290,14 @@ action("js2c_experimental") {
"src/js/proxy.js",
"src/js/generator.js",
"src/js/harmony-atomics.js",
"src/js/harmony-array-includes.js",
"src/js/harmony-regexp.js",
"src/js/harmony-reflect.js",
"src/js/harmony-object-observe.js",
"src/js/harmony-sharedarraybuffer.js",
"src/js/harmony-simd.js"
"src/js/harmony-simd.js",
"src/js/harmony-species.js",
"src/js/harmony-unicode-regexps.js",
"src/js/promise-extra.js"
]
outputs = [
@ -439,7 +416,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@ -447,7 +423,6 @@ if (v8_use_external_startup_data) {
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
"$target_gen_dir/libraries_experimental_extras.bin",
@ -535,7 +510,6 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@ -544,7 +518,6 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@ -570,7 +543,6 @@ source_set("v8_snapshot") {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@ -584,7 +556,6 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@ -606,7 +577,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@ -636,7 +606,10 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
# TODO(fmeawad): This needs to be updated to support standalone V8 builds.
"../base/trace_event/common/trace_event_common.h",
"include/v8-debug.h",
"include/v8-experimental.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@ -654,6 +627,8 @@ source_set("v8_base") {
"src/allocation-site-scopes.h",
"src/api.cc",
"src/api.h",
"src/api-experimental.cc",
"src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/arguments.cc",
@ -662,16 +637,28 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast-expression-visitor.cc",
"src/ast-expression-visitor.h",
"src/ast-literal-reindexer.cc",
"src/ast-literal-reindexer.h",
"src/ast-numbering.cc",
"src/ast-numbering.h",
"src/ast-value-factory.cc",
"src/ast-value-factory.h",
"src/ast.cc",
"src/ast.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-expression-visitor.cc",
"src/ast/ast-expression-visitor.h",
"src/ast/ast-literal-reindexer.cc",
"src/ast/ast-literal-reindexer.h",
"src/ast/ast-numbering.cc",
"src/ast/ast-numbering.h",
"src/ast/ast-value-factory.cc",
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
"src/ast/ast.h",
"src/ast/modules.cc",
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
"src/ast/scopeinfo.cc",
"src/ast/scopeinfo.h",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/variables.cc",
"src/ast/variables.h",
"src/atomic-utils.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
@ -722,10 +709,10 @@ source_set("v8_base") {
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/binary-operator-reducer.cc",
"src/compiler/binary-operator-reducer.h",
"src/compiler/branch-elimination.cc",
"src/compiler/branch-elimination.h",
"src/compiler/bytecode-branch-analysis.cc",
"src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
"src/compiler/change-lowering.cc",
@ -736,6 +723,8 @@ source_set("v8_base") {
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
"src/compiler/code-stub-assembler.cc",
"src/compiler/code-stub-assembler.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
@ -751,6 +740,12 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
"src/compiler/escape-analysis.cc",
"src/compiler/escape-analysis.h",
"src/compiler/escape-analysis-reducer.cc",
"src/compiler/escape-analysis-reducer.h",
"src/compiler/fast-accessor-assembler.cc",
"src/compiler/fast-accessor-assembler.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
@ -772,6 +767,8 @@ source_set("v8_base") {
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-scheduler.cc",
"src/compiler/instruction-scheduler.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
"src/compiler/instruction-selector.h",
@ -781,6 +778,8 @@ source_set("v8_base") {
"src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-call-reducer.cc",
"src/compiler/js-call-reducer.h",
"src/compiler/js-context-relaxation.cc",
"src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
@ -822,8 +821,6 @@ source_set("v8_base") {
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
"src/compiler/machine-type.cc",
"src/compiler/machine-type.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h",
@ -855,6 +852,7 @@ source_set("v8_base") {
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
"src/compiler/schedule.h",
@ -874,12 +872,19 @@ source_set("v8_base") {
"src/compiler/state-values-utils.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
"src/compiler/type-hint-analyzer.cc",
"src/compiler/type-hint-analyzer.h",
"src/compiler/type-hints.cc",
"src/compiler/type-hints.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-linkage.cc",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
"src/compiler.cc",
@ -988,7 +993,6 @@ source_set("v8_base") {
"src/elements.h",
"src/execution.cc",
"src/execution.h",
"src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@ -1015,8 +1019,6 @@ source_set("v8_base") {
"src/frames.h",
"src/full-codegen/full-codegen.cc",
"src/full-codegen/full-codegen.h",
"src/func-name-inferrer.cc",
"src/func-name-inferrer.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
@ -1095,7 +1097,11 @@ source_set("v8_base") {
"src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
"src/interpreter/bytecode-register-allocator.cc",
"src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/constant-array-builder.cc",
"src/interpreter/constant-array-builder.h",
"src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
"src/interpreter/interpreter.cc",
@ -1103,7 +1109,6 @@ source_set("v8_base") {
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
"src/key-accumulator.h",
"src/key-accumulator.cc",
@ -1120,11 +1125,13 @@ source_set("v8_base") {
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
"src/machine-type.cc",
"src/machine-type.h",
"src/messages.cc",
"src/messages.h",
"src/modules.cc",
"src/modules.h",
"src/msan.h",
"src/objects-body-descriptors-inl.h",
"src/objects-body-descriptors.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
@ -1134,20 +1141,31 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parameter-initializer-rewriter.cc",
"src/parameter-initializer-rewriter.h",
"src/parser.cc",
"src/parser.h",
"src/pattern-rewriter.cc",
"src/parsing/expression-classifier.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
"src/parsing/json-parser.h",
"src/parsing/parameter-initializer-rewriter.cc",
"src/parsing/parameter-initializer-rewriter.h",
"src/parsing/parser-base.h",
"src/parsing/parser.cc",
"src/parsing/parser.h",
"src/parsing/pattern-rewriter.cc",
"src/parsing/preparse-data-format.h",
"src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
"src/parsing/preparser.cc",
"src/parsing/preparser.h",
"src/parsing/rewriter.cc",
"src/parsing/rewriter.h",
"src/parsing/scanner-character-streams.cc",
"src/parsing/scanner-character-streams.h",
"src/parsing/scanner.cc",
"src/parsing/scanner.h",
"src/parsing/token.cc",
"src/parsing/token.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
"src/preparser.cc",
"src/preparser.h",
"src/prettyprinter.cc",
"src/prettyprinter.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@ -1175,14 +1193,14 @@ source_set("v8_base") {
"src/property.cc",
"src/property.h",
"src/prototype.h",
"src/rewriter.cc",
"src/rewriter.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
"src/regexp/regexp-ast.cc",
"src/regexp/regexp-ast.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
"src/regexp/regexp-macro-assembler-irregexp.cc",
"src/regexp/regexp-macro-assembler-irregexp.h",
@ -1190,6 +1208,8 @@ source_set("v8_base") {
"src/regexp/regexp-macro-assembler-tracer.h",
"src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
"src/regexp/regexp-parser.cc",
"src/regexp/regexp-parser.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
"src/register-configuration.cc",
@ -1232,14 +1252,6 @@ source_set("v8_base") {
"src/runtime/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
"src/scanner-character-streams.cc",
"src/scanner-character-streams.h",
"src/scanner.cc",
"src/scanner.h",
"src/scopeinfo.cc",
"src/scopeinfo.h",
"src/scopes.cc",
"src/scopes.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
@ -1262,8 +1274,8 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
"src/token.cc",
"src/token.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
@ -1295,12 +1307,28 @@ source_set("v8_base") {
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
"src/variables.cc",
"src/variables.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/asm-wasm-builder.cc",
"src/wasm/asm-wasm-builder.h",
"src/wasm/ast-decoder.cc",
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h",
"src/wasm/encoder.cc",
"src/wasm/encoder.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-macro-gen.h",
"src/wasm/wasm-module.cc",
"src/wasm/wasm-module.h",
"src/wasm/wasm-opcodes.cc",
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
@ -1319,6 +1347,7 @@ source_set("v8_base") {
"src/crankshaft/ia32/lithium-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
@ -1350,6 +1379,7 @@ source_set("v8_base") {
sources += [
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-scheduler-x64.cc",
"src/compiler/x64/instruction-selector-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
@ -1408,6 +1438,7 @@ source_set("v8_base") {
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-scheduler-arm.cc",
"src/compiler/arm/instruction-selector-arm.cc",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
@ -1460,6 +1491,7 @@ source_set("v8_base") {
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
@ -1484,6 +1516,7 @@ source_set("v8_base") {
sources += [
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.h",
@ -1525,6 +1558,7 @@ source_set("v8_base") {
sources += [
"compiler/mips64/code-generator-mips64.cc",
"compiler/mips64/instruction-codes-mips64.h",
"compiler/mips64/instruction-scheduler-mips64.cc",
"compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",

2238
deps/v8/ChangeLog

File diff suppressed because it is too large

16
deps/v8/DEPS

@ -8,13 +8,15 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "2c1e6cced23554ce84806e570acea637f6473afc",
Var("git_url") + "/external/gyp.git" + "@" + "b85ad3e578da830377dbc1843aa4fbc5af17a192",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "42c58d4e49f2250039f0e98d43e0b76e8f5ca024",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "8d342a405be5ae8aacb1e16f0bc31c3a4fbf26a2",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "4a95614772d9bcbd8bc197e1d9bd034e088fc740",
Var("git_url") + "/chromium/buildtools.git" + "@" + "0f8e6e4b126ee88137930a0ae4776c4741808740",
"v8/base/trace_event/common":
Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "d83d44b13d07c2fd0a40101a7deef9b93b841732",
"v8/tools/swarming_client":
Var('git_url') + '/external/swarming.client.git' + '@' + "8fce79620b04bbe5415ace1103db27505bdc4c06",
Var('git_url') + '/external/swarming.client.git' + '@' + "9cdd76171e517a430a72dcd7d66ade67e109aa00",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -25,15 +27,15 @@ deps = {
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/test262/data":
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "ea222fb7d09e334c321b987656315ad4056ded96",
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "67ba34b03a46bac4254223ae25f42c7b959540f0",
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "66f5328417331216569e8beb244fd887f62e8997",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "24e8c1c92fe54ef8ed7651b5850c056983354a4a",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "54492f99c84cab0826a8e656efeb33a1b1bf5a04",
Var("git_url") + "/android_tools.git" + "@" + "f4c36ad89b2696b37d9cd7ca7d984b691888b188",
},
"win": {
"v8/third_party/cygwin":

9
deps/v8/Makefile

@ -220,12 +220,6 @@ ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
# Optionally enable wasm prototype.
# Assume you've placed a link to v8-native-prototype in third_party/wasm.
ifeq ($(wasm), on)
GYPFLAGS += -Dv8_wasm=1
endif
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
@ -244,7 +238,8 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \
s390 s390x
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug

28
deps/v8/PRESUBMIT.py

@ -69,6 +69,7 @@ def _V8PresubmitChecks(input_api, output_api):
from presubmit import SourceProcessor
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
from presubmit import CheckStatusFiles
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@ -80,6 +81,8 @@ def _V8PresubmitChecks(input_api, output_api):
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
if not CheckStatusFiles(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
@ -272,28 +275,3 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nodcheck_rel': set(['defaulttests']),
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux64_asan_rel': set(['defaulttests']),
'v8_linux64_avx2_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win_compile_dbg': set(['defaulttests']),
'v8_win_nosnap_shared_compile_rel': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_linux_arm_rel': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_mipsel_compile_rel': set(['defaulttests']),
'v8_linux_mips64el_compile_rel': set(['defaulttests']),
'v8_android_arm_compile_rel': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
},
}

4
deps/v8/README.md

@ -10,7 +10,7 @@ browser from Google.
V8 can run standalone, or can be embedded into any C++ application.
V8 Project page: https://code.google.com/p/v8/
V8 Project page: https://github.com/v8/v8/wiki
Getting the Code
@ -37,4 +37,4 @@ Contributing
=============
Please follow the instructions mentioned on the
[V8 wiki](https://code.google.com/p/v8-wiki/wiki/Contributing).
[V8 wiki](https://github.com/v8/v8/wiki/Contributing).

14
deps/v8/WATCHLISTS

@ -50,6 +50,12 @@
'feature_shipping_status': {
'filepath': 'src/flag-definitions.h',
},
'gc_changes': {
'filepath': 'src/heap/',
},
'merges': {
'filepath': '.',
},
},
'WATCHLISTS': {
@ -69,5 +75,13 @@
'feature_shipping_status': [
'hablich@chromium.org',
],
'gc_changes': [
'hpayer@chromium.org',
'ulan@chromium.org',
],
'merges': [
# Only enabled on branches created with tools/release/create_release.py
'v8-merges@googlegroups.com',
],
},
}

2
deps/v8/build/all.gyp

@ -24,6 +24,7 @@
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/default.gyp:*',
'../test/ignition.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',
@ -33,6 +34,7 @@
'../test/simdjs/simdjs.gyp:*',
'../test/test262/test262.gyp:*',
'../test/webkit/webkit.gyp:*',
'../tools/check-static-initializers.gyp:*',
],
}],
]

9
deps/v8/build/features.gypi

@ -67,9 +67,6 @@
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
# Set to 1 to enable building with wasm prototype.
'v8_wasm%': 0,
# Enable/disable JavaScript API accessors.
'v8_js_accessors%': 0,
},
@ -111,12 +108,6 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
['v8_wasm!=0', {
'defines': ['V8_WASM',],
}],
['v8_js_accessors!=0', {
'defines': ['V8_JS_ACCESSORS'],
}],
], # conditions
'configurations': {
'DebugBaseCommon': {

1
deps/v8/build/get_landmines.py

@ -25,6 +25,7 @@ def main():
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
return 0

56
deps/v8/build/standalone.gypi

@ -42,8 +42,7 @@
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
# TODO(jochen): Turn this on.
'v8_imminent_deprecation_warnings%': 0,
'v8_imminent_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
@ -68,11 +67,15 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
# Instrument for code coverage with gcov.
'coverage%': 0,
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
'coverage%': '<(coverage)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
@ -106,6 +109,7 @@
# If no gomadir is set, it uses the default gomadir.
'use_goma%': 0,
'gomadir%': '',
'conditions': [
# Set default gomadir.
['OS=="win"', {
@ -113,10 +117,11 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \
coverage==0', {
'host_clang%': 1,
}, {
'host_clang%': '0',
'host_clang%': 0,
}],
# linux_use_bundled_gold: whether to use the gold linker binary checked
# into third_party/binutils. Force this off via GYP_DEFINES when you
@ -160,6 +165,7 @@
'cfi_blacklist%': '<(cfi_blacklist)',
'test_isolation_mode%': '<(test_isolation_mode)',
'fastbuild%': '<(fastbuild)',
'coverage%': '<(coverage)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@ -221,7 +227,7 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
(v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', {
'clang%': 1,
}, {
'clang%': 0,
@ -406,13 +412,16 @@
],
},
'conditions':[
['(clang==1 or host_clang==1) and OS!="win"', {
['clang==0', {
'cflags+': ['-Wno-sign-compare',],
}],
['clang==1 or host_clang==1', {
# This is here so that all files get recompiled after a clang roll and
# when turning clang on or off.
# (defines are passed via the command line, and build systems rebuild
# things when their commandline changes). Nothing should ever read this
# define.
'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
'defines': ['CR_CLANG_REVISION=<!(python <(DEPTH)/tools/clang/scripts/update.py --print-revision)'],
'conditions': [
['host_clang==1', {
'target_conditions': [
@ -575,9 +584,11 @@
'cflags': [
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
'-fPIC',
],
'ldflags': [
'-fsanitize=memory',
'-pie',
],
'defines': [
'MEMORY_SANITIZER',
@ -675,6 +686,7 @@
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@ -684,6 +696,16 @@
],
'ldflags': [ '-pthread', ],
'conditions': [
# Don't warn about TRACE_EVENT_* macros with zero arguments passed to
# ##__VA_ARGS__. C99 strict mode prohibits having zero variadic macro
# arguments in gcc.
[ 'clang==0', {
'cflags!' : [
'-pedantic' ,
# Don't warn about unrecognized command line option.
'-Wno-gnu-zero-variadic-macro-arguments',
],
}],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
'cflags': [ '-Wshorten-64-to-32' ],
@ -697,6 +719,11 @@
[ 'component=="shared_library"', {
'cflags': [ '-fPIC', ],
}],
[ 'coverage==1', {
'cflags!': [ '-O3', '-O2', '-O1', ],
'cflags': [ '-fprofile-arcs', '-ftest-coverage', '-O0'],
'ldflags': [ '-fprofile-arcs'],
}],
],
},
}],
@ -710,6 +737,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@ -817,7 +845,6 @@
4309, # Truncation of constant value
4311, # Pointer truncation from 'type' to 'type'
4312, # Conversion from 'type1' to 'type2' of greater size
4481, # Nonstandard extension used: override specifier 'keyword'
4505, # Unreferenced local function has been removed
4510, # Default constructor could not be generated
4512, # Assignment operator could not be generated
@ -934,6 +961,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
},
'conditions': [
@ -1215,6 +1243,16 @@
['CC', '<(clang_dir)/bin/clang-cl'],
],
}],
['OS=="linux" and target_arch=="arm" and host_arch!="arm" and clang==0 and "<(GENERATOR)"=="ninja"', {
# Set default ARM cross tools on linux. These can be overridden
# using CC,CXX,CC.host and CXX.host environment variables.
'make_global_settings': [
['CC', '<!(which arm-linux-gnueabihf-gcc)'],
['CXX', '<!(which arm-linux-gnueabihf-g++)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
}],
# TODO(yyanagisawa): supports GENERATOR==make
# make generator doesn't support CC_wrapper without CC
# in make_global_settings yet.

125
deps/v8/build/toolchain.gypi

@ -135,6 +135,7 @@
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
host_arch=="ppc" or host_arch=="ppc64" or \
host_arch=="s390" or host_arch=="s390x" or \
clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
@ -145,8 +146,8 @@
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
target_arch=="ppc" or target_arch=="ppc64" or \
clang==1', {
target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
target_arch=="s390x" or clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
},
@ -297,6 +298,23 @@
'V8_TARGET_ARCH_ARM64',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'defines': [
'V8_TARGET_ARCH_S390',
],
'conditions': [
['v8_target_arch=="s390x"', {
'defines': [
'V8_TARGET_ARCH_S390X',
],
}],
['v8_host_byteorder=="little"', {
'defines': [
'V8_TARGET_ARCH_S390_LE_SIM',
],
}],
],
}], # s390
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'defines': [
'V8_TARGET_ARCH_PPC',
@ -357,6 +375,9 @@
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
[ 'clang==1', {
'cflags': ['-integrated-as'],
}],
],
}],
],
@ -406,7 +427,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32r6'],
}],
],
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@ -432,8 +458,11 @@
],
'cflags': ['-mfp32'],
}],
[ 'clang==0', {
'cflags': ['-Wa,-mips32r2'],
}],
],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
@ -441,7 +470,12 @@
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32'],
}],
],
'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@ -450,7 +484,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32'],
}],
],
'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
@ -589,7 +628,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32r6'],
}],
],
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@ -615,13 +659,21 @@
],
'cflags': ['-mfp32'],
}],
[ 'clang==0', {
'cflags': ['-Wa,-mips32r2'],
}],
],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32'],
}],
],
'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@ -630,7 +682,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips32'],
}],
],
'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
@ -639,7 +696,12 @@
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips3', '-Wa,-mips3', '-mfp32'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips3'],
}],
],
'cflags': ['-mips3', '-mfp32'],
}],
],
}, {
@ -800,12 +862,22 @@
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips64r6'],
}],
],
'cflags': ['-mips64r6', '-mabi=64'],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'conditions': [
[ 'clang==0', {
'cflags': ['-Wa,-mips64r2'],
}],
],
'cflags': ['-mips64r2', '-mabi=64'],
'ldflags': ['-mips64r2', '-mabi=64'],
}],
],
@ -925,13 +997,21 @@
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
['host_cxx_is_biarch==1', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ]
'conditions': [
['host_arch=="s390" or host_arch=="s390x"', {
'cflags': [ '-m31' ],
'ldflags': [ '-m31' ]
},{
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ]
}],
],
}],
],
'xcode_settings': {
@ -941,8 +1021,15 @@
['_toolset=="target"', {
'conditions': [
['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
'conditions': [
['host_arch=="s390" or host_arch=="s390x"', {
'cflags': [ '-m31' ],
'ldflags': [ '-m31' ]
},{
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
],
}],
],
'xcode_settings': {
@ -953,7 +1040,7 @@
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64" or \
v8_target_arch=="ppc64")', {
v8_target_arch=="ppc64" or v8_target_arch=="s390x")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [

2
deps/v8/docs/README.md

@ -0,0 +1,2 @@
The documentation for V8 can be found at the
[V8 Wiki](https://github.com/v8/v8/wiki).

205
deps/v8/docs/arm_debugging_with_the_simulator.md

@ -1,205 +0,0 @@
# ARM debugging with the simulator
The simulator and debugger can be very helpful when working with v8 code generation.
* It is convenient as it allows you to test code generation without access to actual hardware.
* No cross or native compilation is needed.
* The simulator fully supports the debugging of generated code.
Please note that this simulator is designed for v8 purposes. Only the features used by v8 are implemented, and you might encounter unimplemented features or instructions. In this case, feel free to implement them and submit the code!
## Details on the ARM Debugger
Compile the ARM simulator shell with:
```
make arm.debug
```
on an x86 host using your regular compiler.
### Starting the Debugger
There are different ways of starting the debugger:
```
$ out/arm.debug/d8 --stop_sim_at <n>
```
The simulator will start the debugger after executing n instructions.
```
$ out/arm.debug/d8 --stop_at <function name>
```
The simulator will stop at the given JavaScript function.
Also you can directly generate 'stop' instructions in the ARM code. Stops are generated with
```
Assembler::stop(const char* msg, Condition cond, int32_t code)
```
When the Simulator hits a stop, it will print msg and start the debugger.
### Debugging commands.
**Usual commands:**
Enter `help` in the debugger prompt to get details on available commands. These include usual gdb-like commands, such as stepi, cont, disasm, etc. If the Simulator is run under gdb, the “gdb” debugger command will give control to gdb. You can then use cont from gdb to go back to the debugger.
**Debugger specific commands:**
Here's a list of the ARM debugger specific commands, along with examples.
The JavaScript file “func.js” used below contains:
```
function test() {
print(“In function test.”);
}
test();
```
* **printobject** `<`register`>` (alias po), will describe an object held in a register.
```
$ out/arm.debug/d8 func.js --stop_at test
Simulator hit stop-at
0xb544d6a8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
sim> print r0
r0: 0xb547ec15 -1253577707
sim> printobject r0
r0:
0xb547ec15: [Function]
- map = 0x0xb540ff01
- initial_map =
- shared_info = 0xb547eb2d <SharedFunctionInfo>
- name = #test
- context = 0xb60083f1 <FixedArray[52]>
- code = 0xb544d681 <Code>
#arguments: 0xb545a15d <Proxy> (callback)
#length: 0xb545a14d <Proxy> (callback)
#name: 0xb545a155 <Proxy> (callback)
#prototype: 0xb545a145 <Proxy> (callback)
#caller: 0xb545a165 <Proxy> (callback)
```
* **break** `<`address`>`, will insert a breakpoint at the specified address.
* **del**, will delete the current breakpoint.
You can have only one such breakpoint. This is useful if you want to insert a breakpoint at runtime.
```
$ out/arm.debug/d8 func.js --stop_at test
Simulator hit stop-at
0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
sim> disasm 5
0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
0xb53a1eec e28db008 add fp, sp, #8
0xb53a1ef0 e59a200c ldr r2, [r10, #+12]
0xb53a1ef4 e28fe004 add lr, pc, #4
0xb53a1ef8 e15d0002 cmp sp, r2
sim> break 0xb53a1ef8
sim> cont
0xb53a1ef8 e15d0002 cmp sp, r2
sim> disasm 5
0xb53a1ef8 e15d0002 cmp sp, r2
0xb53a1efc 359ff034 ldrcc pc, [pc, #+52]
0xb53a1f00 e5980017 ldr r0, [r8, #+23]
0xb53a1f04 e59f1030 ldr r1, [pc, #+48]
0xb53a1f08 e52d0004 str r0, [sp, #-4]!
sim> break 0xb53a1f08
setting breakpoint failed
sim> del
sim> break 0xb53a1f08
sim> cont
0xb53a1f08 e52d0004 str r0, [sp, #-4]!
sim> del
sim> cont
In function test.
```
* Generated `stop` instuctions, will work as breakpoints with a few additional features.
The first argument is a help message, the second is the condition, and the third is the stop code. If a code is specified, and is less than 256, the stop is said to be “watched”, and can be disabled/enabled; a counter also keeps track of how many times the Simulator hits this code.
If we are working on this v8 C++ code, which is reached when running our JavaScript file.
```
__ stop("My stop.", al, 123);
__ mov(r0, r0);
__ mov(r0, r0);
__ mov(r0, r0);
__ mov(r0, r0);
__ mov(r0, r0);
__ stop("My second stop.", al, 0x1);
__ mov(r1, r1);
__ mov(r1, r1);
__ mov(r1, r1);
__ mov(r1, r1);
__ mov(r1, r1);
```
Here's a sample debugging session:
We hit the first stop.
```
Simulator hit My stop.
0xb53559e8 e1a00000 mov r0, r0
```
We can see the following stop using disasm. The address of the message string is inlined in the code after the svc stop instruction.
```
sim> disasm
0xb53559e8 e1a00000 mov r0, r0
0xb53559ec e1a00000 mov r0, r0
0xb53559f0 e1a00000 mov r0, r0
0xb53559f4 e1a00000 mov r0, r0
0xb53559f8 e1a00000 mov r0, r0
0xb53559fc ef800001 stop 1 - 0x1
0xb5355a00 08338a97 stop message: My second stop
0xb5355a04 e1a00000 mov r1, r1
0xb5355a08 e1a00000 mov r1, r1
0xb5355a0c e1a00000 mov r1, r1
```
Information can be printed for all (watched) stops which were hit at least once.
```
sim> stop info all
Stop information:
stop 123 - 0x7b: Enabled, counter = 1, My stop.
sim> cont
Simulator hit My second stop
0xb5355a04 e1a00000 mov r1, r1
sim> stop info all
Stop information:
stop 1 - 0x1: Enabled, counter = 1, My second stop
stop 123 - 0x7b: Enabled, counter = 1, My stop.
```
Stops can be disabled or enabled. (Only available for watched stops.)
```
sim> stop disable 1
sim> cont
Simulator hit My stop.
0xb5356808 e1a00000 mov r0, r0
sim> cont
Simulator hit My stop.
0xb5356c28 e1a00000 mov r0, r0
sim> stop info all
Stop information:
stop 1 - 0x1: Disabled, counter = 2, My second stop
stop 123 - 0x7b: Enabled, counter = 3, My stop.
sim> stop enable 1
sim> cont
Simulator hit My second stop
0xb5356c44 e1a00000 mov r1, r1
sim> stop disable all
sim> con
In function test.
```

40
deps/v8/docs/becoming_v8_committer.md

@ -1,40 +0,0 @@
# Becoming a V8 committer
## What is a committer?
Technically, a committer is someone who has write access to the V8 Git repository. A committer can submit his or her own patches or patches from others.
This privilege is granted with some expectation of responsibility: committers are people who care about the V8 project and want to help meet its goals. A committer is not just someone who can make changes, but someone who has demonstrated his or her ability to collaborate with the team, get the most knowledgeable people to review code, contribute high-quality code, and follow through to fix issues (in code or tests).
A committer is a contributor to the V8 projects' success and a citizen helping the projects succeed. See V8CommittersResponsibility.
## How do I become a committer?
In a nutshell, contribute 20 non-trivial patches and get at least three different people to review them (you'll need three people to support you). Then ask someone to nominate you. You're demonstrating your:
* commitment to the project (20 good patches requires a lot of your valuable time),
* ability to collaborate with the team,
* understanding of how the team works (policies, processes for testing and code review, etc),
* understanding of the projects' code base and coding style, and
* ability to write good code (last but certainly not least)
A current committer nominates you by sending email to v8-committers@googlegroups.com containing:
* your first and last name
* your Google Code email address
* an explanation of why you should be a committer,
* embedded list of links to revisions (about top 10) containing your patches
Two other committers need to second your nomination. If no one objects in 5 working days (U.S.), you're a committer. If anyone objects or wants more information, the committers discuss and usually come to a consensus (within the 5 working days). If issues cannot be resolved, there's a vote among current committers.
Once you get approval from the existing committers, we'll send you instructions for write access to SVN or Git. You'll also be added to v8-committers@googlegroups.com.
In the worst case, this can drag out for two weeks. Keep writing patches! Even in the rare cases where a nomination fails, the objection is usually something easy to address like "more patches" or "not enough people are familiar with this person's work."
## Maintaining committer status
You don't really need to do much to maintain committer status: just keep being awesome and helping the V8 project!
In the unhappy event that a committer continues to disregard good citizenship (or actively disrupts the project), we may need to revoke that person's status. The process is the same as for nominating a new committer: someone suggests the revocation with a good reason, two people second the motion, and a vote may be called if consensus cannot be reached. I hope that's simple enough, and that we never have to test it in practice.
(Source: inspired by http://dev.chromium.org/getting-involved/become-a-committer )

260
deps/v8/docs/building_with_gyp.md

@ -1,260 +0,0 @@
**Build issues? File a bug at code.google.com/p/v8/issues or ask for help on v8-users@googlegroups.com.**
# Building V8
V8 is built with the help of [GYP](http://code.google.com/p/gyp/). GYP is a meta build system of sorts, as it generates build files for a number of other build systems. How you build therefore depends on what "back-end" build system and compiler you're using.
The instructions below assume that you already have a [checkout of V8](using_git.md) but haven't yet installed the build dependencies.
If you intend to develop on V8, i.e., send patches and work with changelists, you will need to install the dependencies as described [here](using_git.md).
## Prerequisite: Installing GYP
First, you need GYP itself. GYP is fetched together with the other dependencies by running:
```
gclient sync
```
## Building
### GCC + make
Requires GNU make 3.81 or later. Should work with any GCC >= 4.8 or any recent clang (3.5 highly recommended).
#### Build instructions
The top-level Makefile defines a number of targets for each target architecture (`ia32`, `x64`, `arm`, `arm64`) and mode (`debug`, `optdebug`, or `release`). So your basic command for building is:
```
make ia32.release
```
or analogously for the other architectures and modes. You can build both debug and release binaries with just one command:
```
make ia32
```
To automatically build in release mode for the host architecture:
```
make native
```
You can also can build all architectures in a given mode at once:
```
make release
```
Or everything:
```
make
```
#### Optional parameters
* `-j` specifies the number of parallel build processes. Set it (roughly) to the number of CPU cores your machine has. The GYP/make based V8 build also supports distcc, so you can compile with `-j100` or so, provided you have enough machines around.
* `OUTDIR=foo` specifies where the compiled binaries go. It defaults to `./out/`. In this directory, a subdirectory will be created for each architecture and mode. You will find the d8 shell's binary in `foo/ia32.release/d8`, for example.
* `library=shared` or `component=shared_library` (the two are completely equivalent) builds V8 as a shared library (`libv8.so`).
* `soname_version=1.2.3` is only relevant for shared library builds and configures the SONAME of the library. Both the SONAME and the filename of the library will be `libv8.so.1.2.3` if you specify this. Due to a peculiarity in GYP, if you specify a custom SONAME, the library's path will no longer be encoded in the binaries, so you'll have to run d8 as follows:
```
LD_LIBRARY_PATH=out/ia32.release/lib.target out/ia32.release/d8
```
* `console=readline` enables readline support for the d8 shell. You need readline development headers for this (`libreadline-dev` on Ubuntu).
* `disassembler=on` enables the disassembler for release mode binaries (it's always enabled for debug binaries). This is useful if you want to inspect generated machine code.
* `snapshot=off` disables building with a heap snapshot. Compiling will be a little faster, but V8’s start up will be slightly slower.
* `gdbjit=on` enables GDB JIT support.
* `liveobjectlist=on` enables the Live Object List feature.
* `vfp3=off` is only relevant for ARM builds with snapshot and disables the use of VFP3 instructions in the snapshot.
* `debuggersupport=off` disables the javascript debugger.
* `werror=no` omits the -Werror flag. This is especially useful for not officially supported C++ compilers (e.g. newer versions of the GCC) so that compile warnings are ignored.
* `strictaliasing=off` passes the -fno-strict-aliasing flag to GCC. This may help to work around build failures on officially unsupported platforms and/or GCC versions.
* `regexp=interpreted` chooses the interpreted mode of the irregexp regular expression engine instead of the native code mode.
* `hardfp=on` creates "hardfp" binaries on ARM.
### Ninja
To build d8:
```
export GYP_GENERATORS=ninja
build/gyp_v8
ninja -C out/Debug d8
```
Specify `out/Release` for a release build. I recommend setting up an alias so that you don't need to type out that build directory path.
If you want to build all targets, use `ninja -C out/Debug all`. It's faster to build only the target you're working on, like `d8` or `unittests`.
Note: You need to set `v8_target_arch` if you want a non-native build, i.e. either
```
export GYP_DEFINES="v8_target_arch=arm"
build/gyp_v8 ...
```
or
```
build/gyp_v8 -Dv8_target_arch=arm ...
```
#### Using goma (Googlers only)
To use goma you need to set the `use_goma` gyp define, either by passing it to `gyp_v8`, i.e.
```
build/gyp_v8 -Duse_goma=1
```
or by setting the environment variable `$GYP_DEFINES` appropriately:
```
export GYP_DEFINES="use_goma=1"
```
Note: You may need to also set `gomadir` to point to the directory where you installed goma, if it's not in the default location.
If you are using goma, you'll also want to bump the job limit, i.e.
```
ninja -j 100 -C out/Debug d8
```
### Cross-compiling
Similar to building with Clang, you can also use a cross-compiler. Just export your toolchain (`CXX`/`LINK` environment variables should be enough) and compile. For example:
```
export CXX=/path/to/cross-compile-g++
export LINK=/path/to/cross-compile-g++
make arm.release
```
### Xcode
From the root of your V8 checkout, run either of:
```
build/gyp_v8 -Dtarget_arch=ia32
build/gyp_v8 -Dtarget_arch=x64
```
This will generate Xcode project files in `build/` that you can then either open with Xcode or compile directly from the command line:
```
xcodebuild -project build/all.xcodeproj -configuration Release
xcodebuild -project build/all.xcodeproj
```
Note: If you have configured your `GYP_GENERATORS` environment variable, either unset it, or set it to `xcode` for this to work.
#### Custom build settings
You can export the `GYP_DEFINES` environment variable in your shell to configure custom build options. The syntax is `GYP_DEFINES="-Dvariable1=value1 -Dvariable2=value2"` and so on for as many variables as you wish. Possibly interesting options include:
* `-Dcomponent=shared_library` (see `library=shared` in the [GCC + make](#Optional_parameters.md) section above)
* `-Dconsole=readline` (see `console=readline`)
* `-Dv8_enable_disassembler=1` (see `disassembler=on`)
* `-Dv8_use_snapshot='false'` (see `snapshot=off`)
* `-Dv8_enable_gdbjit=1` (see `gdbjit=on`)
* `-Dv8_use_liveobjectlist=true` (see `liveobjectlist=on`)
### Visual Studio
You need Visual Studio 2013, older versions might still work at the moment, but this will probably change soon because we intend to use C++11 features.
#### Prerequisites
After you created [checkout of V8](using_git.md), all dependencies will be already installed.
If you are getting errors during build mentioning that 'python' could not be found, add the 'python.exe' to PATH.
If you have Visual Studio 2013 and 2015 installed side-by-side and set the environment variable GYP\_MSVS\_VERSION to '2013'. In that case the right project files are going to be created.
#### Building
* If you use the command prompt:
1. Generate project files:
```
python build\gyp_v8
```
> > > Specify the path to `python.exe` if you don't have it in your PATH.
> > > Append `-Dtarget_arch=x64` if you want to build 64bit binaries. If you switch between ia32 and x64 targets, you may have to manually delete the generated .vcproj/.sln files before regenerating them.
> > > Example:
```
third_party/python_26/python.exe build\gyp_v8 -Dtarget_arch=x64
```
1. Build:
> > > Either open `build\All.sln` in Visual Studio, or compile on the command line as follows (adapt the path as necessary, or simply put `devenv.com` in your PATH):
```
"c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\IDE\devenv.com" /build Release build\All.sln
```
> > > Replace `Release` with `Debug` to build in Debug mode.
> > > The built binaries will be in build\Release\ or build\Debug\.
* If you use cygwin, the workflow is the same, but the syntax is slightly different:
1. Generate project files:
```
build/gyp_v8
```
> > > This will spit out a bunch of warnings about missing input files, but it seems to be OK to ignore them. (If you have time to figure this out, we'd happily accept a patch that makes the warnings go away!)
1. Build:
```
/cygdrive/c/Program\ Files\ (x86)/Microsoft\ Visual\ Studio\ 9.0/Common7/IDE/devenv.com /build Release build/all.sln
```
#### Custom build settings
See the "custom build settings" section for [Xcode](#Xcode) above.
#### Running tests
You can abuse the test driver's --buildbot flag to make it find the executables where MSVC puts them:
```
python tools/run-tests.py --buildbot --outdir build --arch ia32 --mode Release
```
### MinGW
Building on MinGW is not officially supported, but it is possible. You even have two options:
#### Option 1: With Cygwin Installed
Requirements:
* MinGW
* Cygwin, including Python
* Python from www.python.org _(yes, you need two Python installations!)_
Building:
1. Open a MinGW shell
1. `export PATH=$PATH:/c/cygwin/bin` _(or wherever you installed Cygwin)_
1. `make ia32.release -j8`
Running tests:
1. Open a MinGW shell
1. `export PATH=/c/Python27:$PATH` _(or wherever you installed Python)_
1. `make ia32.release.check -j8`
#### Option 2: Without Cygwin, just MinGW
Requirements:
* MinGW
* Python from www.python.org
Building and testing:
1. Open a MinGW shell
1. `tools/mingw-generate-makefiles.sh` _(re-run this any time a `*`.gyp`*` file changed, such as after updating your checkout)_
1. `make ia32.release` _(unfortunately -jX doesn't seem to work here)_
1. `make ia32.release.check -j8`
# Final Note
<font color='darkred'><b>If you have problems or questions, please file bugs at code.google.com/p/v8/issues or send mail to v8-users@googlegroups.com. Comments on this page are likely to go unnoticed and unanswered.</b></font>

32
deps/v8/docs/contributing.md

@ -1,32 +0,0 @@
Here you will find information that you'll need to be able to contribute to V8. Be sure to read the whole thing before sending us a contribution, including the small print at the end.
## Before you contribute
Before you start working on a larger contribution V8 you should get in touch with us first through the V8 [contributor mailing list](http://groups.google.com/group/v8-dev) so we can help out and possibly guide you; coordinating up front makes it much easier to avoid frustration later on.
## Getting the code
See [UsingGit](using_git.md).
## Submitting code
The source code of V8 follows the [Google C++ Style Guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) so you should familiarize yourself with those guidelines. Before submitting code you must pass all our [tests](http://code.google.com/p/v8-wiki/wiki/Testing), and have to successfully run the presubmit checks:
> `tools/presubmit.py`
The presubmit script uses a linter from Google, `cpplint.py`. External contributors can get this from [here](http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py) and place it in their path.
All submissions, including submissions by project members, require review. We use the same code-review tools and process as the chromium project. In order to submit a patch, you need to get the [depot\_tools](http://dev.chromium.org/developers/how-tos/install-depot-tools) and follow these instructions on [requesting a review](http://dev.chromium.org/developers/contributing-code) (using your V8 workspace instead of a chromium workspace).
### Look out for breakage or regressions
Before submitting your code please check the [buildbot console](http://build.chromium.org/p/client.v8/console) to see that the columns are mostly green before checking in your changes. Otherwise you will not know if your changes break the build or not. When your change is committed watch the [buildbot console](http://build.chromium.org/p/client.v8/console) until the bots turn green after your change.
## The small print
Before we can use your code you have to sign the [Google Individual Contributor License Agreement](http://code.google.com/legal/individual-cla-v1.0.html), which you can do online. This is mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things, for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to do this until after you've submitted your code for review and a member has approved it, but you will have to do it before we can put your code into our codebase.
Contributions made by corporations are covered by a different agreement than the one above, the [Software Grant and Corporate Contributor License Agreement](http://code.google.com/legal/corporate-cla-v1.0.html).
Sign them online [here](https://cla.developers.google.com/)

151
deps/v8/docs/cross_compiling_for_arm.md

@ -1,151 +0,0 @@
<font color='darkred'><b><h2>Building V8 with SCons is no longer supported. See <a href='https://code.google.com/p/v8-wiki/wiki/BuildingWithGYP'>BuildingWithGYP</a>.</h2></b></font>
---
# Using Sourcery G++ Lite
The Sourcery G++ Lite cross compiler suite is a free version of Sourcery G++ from [CodeSourcery](http://www.codesourcery.com). There is a page for the [GNU Toolchain for ARM Processors](http://www.codesourcery.com/sgpp/lite/arm). Determine the version you need for your host/target combination.
The following instructions uses [2009q1-203 for ARM GNU/Linux](http://www.codesourcery.com/sgpp/lite/arm/portal/release858), and if using a different version please change the URLs and `TOOL_PREFIX` below accordingly.
## Installing on host and target
The simplest way of setting this up is to install the full Sourcery G++ Lite package on both the host and target at the same location. This will ensure that all the libraries required are available on both sides. If you want to use the default libraries on the host there is no need the install anything on the target.
The following script will install in `/opt/codesourcery`:
```
#!/bin/sh
sudo mkdir /opt/codesourcery
cd /opt/codesourcery
sudo chown $USERNAME .
chmod g+ws .
umask 2
wget http://www.codesourcery.com/sgpp/lite/arm/portal/package4571/public/arm-none-linux-gnueabi/arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
tar -xvf arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
```
## Building using scons without snapshot
The simplest way to build is without snapshot, as that does no involve using the simulator to generate the snapshot. The following script will build the sample shell without snapshot for ARM v7.
```
#!/bin/sh
export TOOL_PREFIX=/opt/codesourcery/arm-2009q1/bin/arm-none-linux-gnueabi
export CXX=$TOOL_PREFIX-g++
export AR=$TOOL_PREFIX-ar
export RANLIB=$TOOL_PREFIX-ranlib
export CC=$TOOL_PREFIX-gcc
export LD=$TOOL_PREFIX-ld
export CCFLAGS="-march=armv7-a -mtune=cortex-a8 -mfpu=vfp"
export ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
scons wordsize=32 snapshot=off arch=arm sample=shell
```
If the processor is not Cortex A8 or does not have VFP enabled the `-mtune=cortex-a8` and `-mfpu=vfp` part of `CCFLAGS` needs to be changed accordingly. By default the V8 SCons build adds `-mfloat-abi=softfp`.
If using the default libraries on the target just leave out the setting of `ARM_TARGET_LIB` and if the target libraies are in a different location ARM\_TARGET\_LIB` needs to be adjusted accordingly.
The default for Sourcery G++ Lite is ARM v5te with software floating point emulation, so if testing building for ARM v5te the setting of `CCFLAGS` and `ARM_TARGET_LIB` should be changed to:
```
CCFLAGS=""
ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
scons armeabi=soft ...
```
Relying on defaults in the tool chain might lead to surprises, so for ARM v5te with software floating point emulation the following is more explicit:
```
CCFLAGS="-march=armv5te"
ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
scons armeabi=soft ...
```
If the target has an VFP unit use the following:
```
CCFLAGS="-mfpu=vfpv3"
ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
```
To allow G++ to use Thumb2 instructions and the VFP unit when compiling the C/C++ code use:
```
CCFLAGS="-mthumb -mfpu=vfpv3"
ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc/thumb2
```
_Note:_ V8 will not use Thumb2 instructions in its generated code it always uses the full ARM instruction set.
For other ARM versions please check the Sourcery G++ Lite documentation.
As mentioned above the default for Sourcery G++ Lite used here is ARM v5te with software floating point emulation. However beware that this default might change between versions and that there is no unique defaults for ARM tool chains in general, so always passing `-march` and possibly `-mfpu` is recommended. Passing `-mfloat-abi` is not required as this is controlled by the SCons option `armeabi`.
## Building using scons with snapshot
When building with snapshot the simulator is used to build the snapshot on the host and then building for the target with that snapshot. The following script will accomplish that (using both Thumb2 and VFP instructions):
```
#!/bin/sh
V8DIR=..
cd host
scons -Y$V8DIR simulator=arm snapshot=on
mv obj/release/snapshot.cc $V8DIR/src/snapshot.cc
cd ..
export TOOL_PREFIX=/opt/codesourcery/arm-2010.09-103/bin/arm-none-linux-gnueabi
export CXX=$TOOL_PREFIX-g++
export AR=$TOOL_PREFIX-ar
export RANLIB=$TOOL_PREFIX-ranlib
export CC=$TOOL_PREFIX-gcc
export LD=$TOOL_PREFIX-ld
export CCFLAGS="-mthumb -march=armv7-a -mfpu=vfpv3"
export ARM_TARGET_LIB=/opt/codesourcery/arm-2010.09-103/arm-none-linux-gnueabi/libc/thumb2
cd target
scons -Y$V8DIR wordsize=32 snapshot=nobuild arch=armsample=shell
rm $V8DIR/src/snapshot.cc
cd ..
```
This script required the two subdirectories `host` and `target`. V8 is first build for the host with the ARM simulator which supports running ARM code on the host. This is used to build a snapshot file which is then used for the actual cross compilation of V8.
## Building for target which supports unaligned access
The default when building V8 for an ARM target (either cross compiling or compiling on an ARM machine) is to disable unaligned memory access. However in some situations (most noticeably handling of regular expressions) performance will be better if unaligned memory access is used on processors which supports it. To enable unaligned memory access set `unalignedaccesses` to `on` when building:
```
scons unalignedaccesses=on ...
```
When running in the simulator the default is to enable unaligned memory access, so to test in the simulator with unaligned memory access disabled set `unalignedaccesses` to `off` when building:
```
scons unalignedaccesses=off simulator=arm ...
```
## Using V8 with hardfp calling convention
By default V8 uses the softfp calling convention when calling C functions from generated code. However it is possible to use hardfp as well. To enable this set `armeabi` to `hardfp` when building:
```
scons armeabi=hardfp ...
```
Passing `armeabi=hardfp` to SCons will automatically set the compiler flag `-mfloat-abi=hardfp`. If using snapshots remember to pass `armeabi=hardfp` when building V8 on the host for generating the snapshot as well.

101
deps/v8/docs/d8_on_android.md

@ -1,101 +0,0 @@
# Prerequisites
* a Linux/Mac workstation
* v8 r12178 (on Google Code) or later
* an Android emulator or device with matching USB cable
* make sure [building with GYP](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP) works
# Get the code
* Use the instructions from https://code.google.com/p/v8-wiki/wiki/UsingGit to get the code
* Once you need to add the android dependencies:
```
v8$ echo "target_os = ['android']" >> ../.gclient && gclient sync --nohooks
```
* The sync will take a while the first time as it downloads the Android NDK to v8/third\_party
* If you want to use a different NDK, you need to set the gyp variable android\_ndk\_root
# Get the Android SDK
* tested version: `r15`
* download the SDK from http://developer.android.com/sdk/index.html
* extract it
* install the "Platform tools" using the SDK manager that you can start by running `tools/android`
* now you have a `platform_tools/adb` binary which will be used later; put it in your `PATH` or remember where it is
# Set up your device
* Enable USB debugging (Gingerbread: Settings > Applications > Development > USB debugging; Ice Cream Sandwich: Settings > Developer Options > USB debugging)
* connect your device to your workstation
* make sure `adb devices` shows it; you may have to edit `udev` rules to give yourself proper permissions
* run `adb shell` to get an ssh-like shell on the device. In that shell, do:
```
cd /data/local/tmp
mkdir v8
cd v8
```
# Push stuff onto the device
* make sure your device is connected
* from your workstation's shell:
```
adb push /file/you/want/to/push /data/local/tmp/v8/
```
# Compile V8 for Android
Currently two architectures (`android_arm` and `android_ia32`) are supported, each in `debug` or `release` mode. The following steps work equally well for both ARM and ia32, on either the emulator or real devices.
* compile:
```
make android_arm.release -j16
```
* push the resulting binary to the device:
```
adb push out/android_arm.release/d8 /data/local/tmp/v8/d8
```
* the most comfortable way to run it is from your workstation's shell as a one-off command (rather than starting an interactive shell session on the device), that way you can use pipes or whatever to process the output as necessary:
```
adb shell /data/local/tmp/v8/d8 <parameters>
```
* warning: when you cancel such an "adb shell whatever" command using Ctrl+C, the process on the phone will sometimes keep running.
* Alternatively, use the `.check` suffix to automatically push test binaries and test cases onto the device and run them.
```
make android_arm.release.check
```
# Profile
* compile a binary, push it to the device, keep a copy of it on the host
```
make android_arm.release -j16
adb push out/android_arm.release/d8 /data/local/tmp/v8/d8-version.under.test
cp out/android_arm.release/d8 ./d8-version.under.test
```
* get a profiling log and copy it to the host:
```
adb shell /data/local/tmp/v8/d8-version.under.test benchmark.js --prof
adb pull /data/local/tmp/v8/v8.log ./
```
* open `v8.log` in your favorite editor and edit the first line to match the full path of the `d8-version.under.test` binary on your workstation (instead of the `/data/local/tmp/v8/` path it had on the device)
* run the tick processor with the host's `d8` and an appropriate `nm` binary:
```
cp out/ia32.release/d8 ./d8 # only required once
tools/linux-tick-processor --nm=$ANDROID_NDK_ROOT/toolchain/bin/arm-linux-androideabi-nm
```
# Compile SpiderMonkey for Lollipop
```
cd firefox/js/src
autoconf2.13
./configure \
--target=arm-linux-androideabi \
--with-android-ndk=$ANDROID_NDK_ROOT \
--with-android-version=21 \
--without-intl-api \
--disable-tests \
--enable-android-libstdcxx \
--enable-pie
make
adb push -p js/src/shell/js /data/local/tmp/js
```

934
deps/v8/docs/debugger_protocol.md

@ -1,934 +0,0 @@
# Introduction
V8 has support for debugging the JavaScript code running in it. There are two API's for this a function based API using JavaScript objects and a message based API using a JSON based protocol. The function based API can be used by an in-process debugger agent, whereas the message based API can be used out of process as well.
**> The message based API is no longer maintained. Please ask in v8-users@googlegroups.com if you want to attach a debugger to the run-time.**
The debugger protocol is based on [JSON](http://www.json.org/)). Each protocol packet is defined in terms of JSON and is transmitted as a string value. All packets have two basic elements `seq` and `type`.
```
{ "seq" : <number>,
"type" : <type>,
...
}
```
The element `seq` holds the sequence number of the packet. And element type is the type of the packet. The type is a string value with one of the following values `"request"`, `"response"` or `"event"`.
A `"request"` packet has the following structure:
```
{ "seq" : <number>,
"type" : "request",
"command" : <command>
"arguments" : ...
}
```
A `"response"` packet has the following structure. If `success` is true `body` will contain the response data. If `success` is false `message` will contain an error message.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : <command>
"body" : ...
"running" : <is the VM running after sending this response>
"success" : <boolean indicating success>
"message" : <if command failed this property contains an error message>
}
```
An `"event"` packet has the following structure:
```
{ "seq" : <number>,
"type" : "event",
"event" : <event name>
body : ...
}
```
# Request/response pairs
## Request `continue`
The request `continue` is a request from the debugger to start the VM running again. As part of the `continue` request the debugger can specify if it wants the VM to perform a single step action.
```
{ "seq" : <number>,
"type" : "request",
"command" : "continue",
"arguments" : { "stepaction" : <"in", "next" or "out">,
"stepcount" : <number of steps (default 1)>
}
}
```
In the response the property `running` will always be true as the VM will be running after executing the `continue` command. If a single step action is requested the VM will respond with a `break` event after running the step.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "continue",
"running" : true
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"continue"}
{"seq":118,"type":"request","command":"continue","arguments":{"stepaction":"out"}}
{"seq":119,"type":"request","command":"continue","arguments":{"stepaction":"next","stepcount":5}}
```
## Request `evaluate`
The request `evaluate` is used to evaluate an expression. The body of the result is as described in response object serialization below.
```
{ "seq" : <number>,
"type" : "request",
"command" : "evaluate",
"arguments" : { "expression" : <expression to evaluate>,
"frame" : <number>,
"global" : <boolean>,
"disable_break" : <boolean>,
"additional_context" : [
{ "name" : <name1>, "handle" : <handle1> },
{ "name" : <name2>, "handle" : <handle2> },
...
]
}
}
```
Optional argument `additional_context` specifies handles that will be visible from the expression under corresponding names (see example below).
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "evaluate",
"body" : ...
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"evaluate","arguments":{"expression":"1+2"}}
{"seq":118,"type":"request","command":"evaluate","arguments":{"expression":"a()","frame":3,"disable_break":false}}
{"seq":119,"type":"request","command":"evaluate","arguments":{"expression":"[o.a,o.b,o.c]","global":true,"disable_break":true}}
{"seq":120,"type":"request","command":"evaluate","arguments":{"expression":"obj.toString()", "additional_context": [{ "name":"obj","handle":25 }] }}
```
## Request `lookup`
The request `lookup` is used to lookup objects based on their handle. The individual array elements of the body of the result is as described in response object serialization below.
```
{ "seq" : <number>,
"type" : "request",
"command" : "lookup",
"arguments" : { "handles" : <array of handles>,
"includeSource" : <boolean indicating whether the source will be included when script objects are returned>,
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "lookup",
"body" : <array of serialized objects indexed using their handle>
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"lookup","arguments":{"handles":"[1]"}}
{"seq":118,"type":"request","command":"lookup","arguments":{"handles":"[7,12]"}}
```
## Request `backtrace`
The request `backtrace` returns a backtrace (or stacktrace) from the current execution state. When issuing a request a range of frames can be supplied. The top frame is frame number 0. If no frame range is supplied data for 10 frames will be returned.
```
{ "seq" : <number>,
"type" : "request",
"command" : "backtrace",
"arguments" : { "fromFrame" : <number>
"toFrame" : <number>
"bottom" : <boolean, set to true if the bottom of the stack is requested>
}
}
```
The response contains the frame data together with the actual frames returned and the toalt frame count.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "backtrace",
"body" : { "fromFrame" : <number>
"toFrame" : <number>
"totalFrames" : <number>
"frames" : <array of frames - see frame request for details>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
If there are no stack frames the result body only contains `totalFrames` with a value of `0`. When an exception event is generated due to compilation failures it is possible that there are no stack frames.
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"backtrace"}
{"seq":118,"type":"request","command":"backtrace","arguments":{"toFrame":2}}
{"seq":119,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":9}}
```
## Request `frame`
The request frame selects a new selected frame and returns information for that. If no frame number is specified the selected frame is returned.
```
{ "seq" : <number>,
"type" : "request",
"command" : "frame",
"arguments" : { "number" : <frame number>
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "frame",
"body" : { "index" : <frame number>,
"receiver" : <frame receiver>,
"func" : <function invoked>,
"script" : <script for the function>,
"constructCall" : <boolean indicating whether the function was called as constructor>,
"debuggerFrame" : <boolean indicating whether this is an internal debugger frame>,
"arguments" : [ { name: <name of the argument - missing of anonymous argument>,
value: <value of the argument>
},
... <the array contains all the arguments>
],
"locals" : [ { name: <name of the local variable>,
value: <value of the local variable>
},
... <the array contains all the locals>
],
"position" : <source position>,
"line" : <source line>,
"column" : <source column within the line>,
"sourceLineText" : <text for current source line>,
"scopes" : [ <array of scopes, see scope request below for format> ],
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"frame"}
{"seq":118,"type":"request","command":"frame","arguments":{"number":1}}
```
## Request `scope`
The request scope returns information on a givne scope for a givne frame. If no frame number is specified the selected frame is used.
```
{ "seq" : <number>,
"type" : "request",
"command" : "scope",
"arguments" : { "number" : <scope number>
"frameNumber" : <frame number, optional uses selected frame if missing>
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "scope",
"body" : { "index" : <index of this scope in the scope chain. Index 0 is the top scope
and the global scope will always have the highest index for a
frame>,
"frameIndex" : <index of the frame>,
"type" : <type of the scope:
0: Global
1: Local
2: With
3: Closure
4: Catch >,
"object" : <the scope object defining the content of the scope.
For local and closure scopes this is transient objects,
which has a negative handle value>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"scope"}
{"seq":118,"type":"request","command":"scope","arguments":{"frameNumber":1,"number":1}}
```
## Request `scopes`
The request scopes returns all the scopes for a given frame. If no frame number is specified the selected frame is returned.
```
{ "seq" : <number>,
"type" : "request",
"command" : "scopes",
"arguments" : { "frameNumber" : <frame number, optional uses selected frame if missing>
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "scopes",
"body" : { "fromScope" : <number of first scope in response>,
"toScope" : <number of last scope in response>,
"totalScopes" : <total number of scopes for this frame>,
"scopes" : [ <array of scopes, see scope request above for format> ],
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"scopes"}
{"seq":118,"type":"request","command":"scopes","arguments":{"frameNumber":1}}
```
## Request `scripts`
The request `scripts` retrieves active scripts from the VM. An active script is source code from which there is still live objects in the VM. This request will always force a full garbage collection in the VM.
```
{ "seq" : <number>,
"type" : "request",
"command" : "scripts",
"arguments" : { "types" : <types of scripts to retrieve
set bit 0 for native scripts
set bit 1 for extension scripts
set bit 2 for normal scripts
(default is 4 for normal scripts)>
"ids" : <array of id's of scripts to return. If this is not specified all scripts are requrned>
"includeSource" : <boolean indicating whether the source code should be included for the scripts returned>
"filter" : <string or number: filter string or script id.
If a number is specified, then only the script with the same number as its script id will be retrieved.
If a string is specified, then only scripts whose names contain the filter string will be retrieved.>
}
}
```
The request contains an array of the scripts in the VM. This information includes the relative location of the script within the containing resource.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "scripts",
"body" : [ { "name" : <name of the script>,
"id" : <id of the script>
"lineOffset" : <line offset within the containing resource>
"columnOffset" : <column offset within the containing resource>
"lineCount" : <number of lines in the script>
"data" : <optional data object added through the API>
"source" : <source of the script if includeSource was specified in the request>
"sourceStart" : <first 80 characters of the script if includeSource was not specified in the request>
"sourceLength" : <total length of the script in characters>
"scriptType" : <script type (see request for values)>
"compilationType" : < How was this script compiled:
0 if script was compiled through the API
1 if script was compiled through eval
>
"evalFromScript" : <if "compilationType" is 1 this is the script from where eval was called>
"evalFromLocation" : { line : < if "compilationType" is 1 this is the line in the script from where eval was called>
column : < if "compilationType" is 1 this is the column in the script from where eval was called>
]
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"scripts"}
{"seq":118,"type":"request","command":"scripts","arguments":{"types":7}}
```
## Request `source`
The request `source` retrieves source code for a frame. It returns a number of source lines running from the `fromLine` to but not including the `toLine`, that is the interval is open on the "to" end. For example, requesting source from line 2 to 4 returns two lines (2 and 3). Also note that the line numbers are 0 based: the first line is line 0.
```
{ "seq" : <number>,
"type" : "request",
"command" : "source",
"arguments" : { "frame" : <frame number (default selected frame)>
"fromLine" : <from line within the source default is line 0>
"toLine" : <to line within the source this line is not included in
the result default is the number of lines in the script>
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "source",
"body" : { "source" : <the source code>
"fromLine" : <actual from line within the script>
"toLine" : <actual to line within the script this line is not included in the source>
"fromPosition" : <actual start position within the script>
"toPosition" : <actual end position within the script>
"totalLines" : <total lines in the script>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"source","arguments":{"fromLine":10,"toLine":20}}
{"seq":118,"type":"request","command":"source","arguments":{"frame":2,"fromLine":10,"toLine":20}}
```
## Request `setbreakpoint`
The request `setbreakpoint` creates a new break point. This request can be used to set both function and script break points. A function break point sets a break point in an existing function whereas a script break point sets a break point in a named script. A script break point can be set even if the named script is not found.
```
{ "seq" : <number>,
"type" : "request",
"command" : "setbreakpoint",
"arguments" : { "type" : <"function" or "script" or "scriptId" or "scriptRegExp">
"target" : <function expression or script identification>
"line" : <line in script or function>
"column" : <character position within the line>
"enabled" : <initial enabled state. True or false, default is true>
"condition" : <string with break point condition>
"ignoreCount" : <number specifying the number of break point hits to ignore, default value is 0>
}
}
```
The result of the `setbreakpoint` request is a response with the number of the newly created break point. This break point number is used in the `changebreakpoint` and `clearbreakpoint` requests.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "setbreakpoint",
"body" : { "type" : <"function" or "script">
"breakpoint" : <break point number of the new break point>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f"}}
{"seq":118,"type":"request","command":"setbreakpoint","arguments":{type:"script","target":"test.js","line":100}}
{"seq":119,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f","condition":"i > 7"}}
```
## Request `changebreakpoint`
The request `changebreakpoint` changes the status of a break point.
```
{ "seq" : <number>,
"type" : "request",
"command" : "changebreakpoint",
"arguments" : { "breakpoint" : <number of the break point to clear>
"enabled" : <initial enabled state. True or false, default is true>
"condition" : <string with break point condition>
"ignoreCount" : <number specifying the number of break point hits }
}
```
## Request `clearbreakpoint`
The request `clearbreakpoint` clears a break point.
```
{ "seq" : <number>,
"type" : "request",
"command" : "clearbreakpoint",
"arguments" : { "breakpoint" : <number of the break point to clear>
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "clearbreakpoint",
"body" : { "type" : <"function" or "script">
"breakpoint" : <number of the break point cleared>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here are a couple of examples.
```
{"seq":117,"type":"request","command":"clearbreakpoint","arguments":{"type":"function,"breakpoint":1}}
{"seq":118,"type":"request","command":"clearbreakpoint","arguments":{"type":"script","breakpoint":2}}
```
## Request `setexceptionbreak`
The request `setexceptionbreak` is a request to enable/disable breaks on all / uncaught exceptions. If the "enabled" argument is not specify, the debuggee will toggle the state of the specified break type.
```
{ "seq" : <number>,
"type" : "request",
"command" : "setexceptionbreak",
"arguments" : { "type" : <string: "all", or "uncaught">,
"enabled" : <optional bool: enables the break type if true>
}
}
```
In response, the break on exception property of the debuggee will be set accordingly, and the following response message will be dispatched to the debugger.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "setexceptionbreak",
“body” : { "type" : <string: "all" or "uncaught" corresponding to the request.>,
"enabled" : <bool: true if the break type is currently enabled as a result of the request>
}
"running" : true
"success" : true
}
```
Here are a few examples.
```
{"seq":117,"type":"request","command":"setexceptionbreak","arguments":{"type":"all"}}
{"seq":118,"type":"request","command":" setexceptionbreak","arguments":{"type":"all",”enabled”:false}}
{"seq":119,"type":"request","command":" setexceptionbreak","arguments":{"type":"uncaught","enabled":true}}
```
## Request `v8flags`
The request v8flags is a request to apply the specified v8 flags (analogous to how they are specified on the command line).
```
{ "seq" : <number>,
"type" : "request",
"command" : "v8flags",
"arguments" : { "flags" : <string: a sequence of v8 flags just like those used on the command line>
}
}
```
In response, the specified flags will be applied in the debuggee if they are legal flags. Their effects vary depending on the implementation of the flag.
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "v8flags",
"running" : true
"success" : true
}
```
Here are a few examples.
```
{"seq":117,"type":"request","command":"v8flags","arguments":{"flags":"--trace_gc —always_compact"}}
{"seq":118,"type":"request","command":" v8flags","arguments":{"flags":"--notrace_gc"}}
```
## Request `version`
The request `version` reports version of the running V8.
```
{ "seq" : <number>,
"type" : "request",
"command" : "version",
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"type" : "request",
"body" : { "V8Version": <string, version of V8>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here is an example.
```
{"seq":1,"type":"request","command":"version"}
{"seq":134,"request_seq":1,"type":"response","command":"version","success":true,"body":{"V8Version":"1.3.19 (candidate)"},"refs":[],"running":false}
```
## Request `disconnect`
The request `disconnect` is used to detach the remote debugger from the debuggee. This will trigger the debuggee to disable all active breakpoints and resumes execution if the debuggee was previously stopped at a break.
```
{ "seq" : <number>,
"type" : "request",
"command" : "disconnect",
}
```
The only response for the `disconnect` request is the response to a connect request if the debugger is still able to get a response before the debuggee successfully disconnects.
Here is an examples:
```
{"seq":117,"type":"request","command":"disconnect"}
```
## Request `gc`
The request `gc` is a request to run the garbage collector in the debuggee.
```
{ "seq" : <number>,
"type" : "request",
"command" : "gc",
"arguments" : { "type" : <string: "all">,
}
}
```
In response, the debuggee will run the specified GC type and send the following response message:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "gc",
“body” : { "before" : <int: total heap usage in bytes before the GC>,
"after" : <int: total heap usage in bytes after the GC>
}
"running" : true
"success" : true
}
```
Here is an example.
```
{"seq":117,"type":"request","command":"gc","arguments":{"type":"all"}}
```
## Request `listbreakpoints`
The request `listbreakpoints` is used to get information on breakpoints that may have been set by the debugger.
```
{ "seq" : <number>,
"type" : "request",
"command" : "listbreakpoints",
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"command" : "listbreakpoints",
"body" : { "breakpoints": [ { "type" : <string: "scriptId" or "scriptName".>,
"script_id" : <int: script id. Only defined if type is scriptId.>,
"script_name" : <string: script name. Only defined if type is scriptName.>,
"number" : <int: breakpoint number. Starts from 1.>,
"line" : <int: line number of this breakpoint. Starts from 0.>,
"column" : <int: column number of this breakpoint. Starts from 0.>,
"groupId" : <int: group id of this breakpoint.>,
"hit_count" : <int: number of times this breakpoint has been hit. Starts from 0.>,
"active" : <bool: true if this breakpoint is enabled.>,
"ignoreCount" : <int: remaining number of times to ignore breakpoint. Starts from 0.>,
"actual_locations" : <actual locations of the breakpoint.>,
}
],
"breakOnExceptions" : <true if break on all exceptions is enabled>,
"breakOnUncaughtExceptions" : <true if break on uncaught exceptions is enabled>
}
"running" : <is the VM running after sending this response>
"success" : true
}
```
Here is an examples:
```
{"seq":117,"type":"request","command":"listbreakpoints"}
```
## Request `setvariablevalue`
This requests sets the value of a variable from the specified scope.
Request:
```
{ "seq" : <number>,
"type" : "request",
"command" : "setvariablevalue",
"arguments : { "name" : <string: variable name>,
"scope" : { "number" : <scope number>
"frameNumber" : <frame number, optional uses selected frame if missing>
}
}
}
```
Response:
```
{ "seq" : <number>,
"type" : "response",
"request_seq" : <number>,
"type" : "request",
"body" : { "newValue": <object: mirror object of the new value> }
"running" : <is the VM running after sending this response>
"success" : true
}
```
# Events
## Event `break`
The event `break` indicate that the execution in the VM has stopped due to a break condition. This can be caused by an unconditional break request, by a break point previously set, a stepping action have completed or by executing the `debugger` statement in JavaScript.
```
{ "seq" : <number>,
"type" : "event",
"event" : "break",
"body" : { "invocationText" : <text representation of the stack frame>,
"sourceLine" : <source line where execution is stopped>,
"sourceColumn" : <column within the source line where execution is stopped>,
"sourceLineText" : <text for the source line where execution is stopped>,
"script" : { name : <resource name of the origin of the script>
lineOffset : <line offset within the origin of the script>
columnOffset : <column offset within the origin of the script>
lineCount : <number of lines in the script>
"breakpoints" : <array of break point numbers hit if any>
}
}
```
Here are a couple of examples.
```
{"seq":117,"type":"event","event":"break","body":{"functionName":"f","sourceLine":1,"sourceColumn":14}}
{"seq":117,"type":"event","event":"break","body":{"functionName":"g","scriptData":"test.js","sourceLine":12,"sourceColumn":22,"breakpoints":[1]}}
{"seq":117,"type":"event","event":"break","body":{"functionName":"h","sourceLine":100,"sourceColumn":12,"breakpoints":[3,5,7]}}
```
## Event `exception`
The event `exception` indicate that the execution in the VM has stopped due to an exception.
```
{ "seq" : <number>,
"type" : "event",
"event" : "exception",
"body" : { "uncaught" : <boolean>,
"exception" : ...
"sourceLine" : <source line where the exception was thrown>,
"sourceColumn" : <column within the source line from where the exception was thrown>,
"sourceLineText" : <text for the source line from where the exception was thrown>,
"script" : { "name" : <name of script>
"lineOffset" : <number>
"columnOffset" : <number>
"lineCount" : <number>
}
}
}
```
# Response object serialization
Some responses contain objects as part of the body, e.g. the response to the evaluate request contains the result of the expression evaluated.
All objects exposed through the debugger is assigned an ID called a handle. This handle is serialized and can be used to identify objects. A handle has a certain lifetime after which it will no longer refer to the same object. Currently the lifetime of handles match the processing of a debug event. For each debug event handles are recycled.
An object can be serialized either as a reference to a given handle or as a value representation containing the object content.
An object serialized as a reference looks follows this where `<handle>` is an integer.
```
{"ref":<handle>}
```
For objects serialized as value they all contains the handle and the type of the object.
```
{ "handle" : <handle>,
"type" : <"undefined", "null", "boolean", "number", "string", "object", "function" or "frame">
}
```
In some situations special transient objects are created by the debugger. These objects are not really visible in from JavaScript, but are created to materialize something inside the VM as an object visible to the debugger. One example of this is the local scope object returned from the `scope` and `scopes` request. Transient objects are identified by having a negative handle. A transient object can never be retrieved using the `lookup` request, so all transient objects referenced will be in the `refs` part of the response. The lifetime of transient objects is basically the request they are involved in.
For the primitive JavaScript types `undefined` and `null` the type describes the value fully.
```
{"handle":<handle>,"type":"undefined"}
```
```
{"handle":<handle>,"type":"null"}
```
For the rest of the primitive types `boolean`, `number` and `string` the value is part of the result.
```
{ "handle":<handle>,
"type" : <"boolean", "number" or "string">
"value" : <JSON encoded value>
}
```
Boolean value.
```
{"handle":7,"type":"boolean","value":true}
```
Number value.
```
{"handle":8,"type":"number","value":42}
```
String value.
```
{"handle":9,"type":"string","value":"a string"}
```
An object is encoded with additional information.
```
{ "handle" : <handle>,
"type" : "object",
"className" : <Class name, ECMA-262 property [[Class]]>,
"constructorFunction" : {"ref":<handle>},
"protoObject" : {"ref":<handle>},
"prototypeObject" : {"ref":<handle>},
"properties" : [ {"name" : <name>,
"ref" : <handle>
},
...
]
}
```
The difference between the `protoObject` and the `prototypeObject` is that the `protoObject` contains a reference to the actual prototype object (for which accessibility is not defined in ECMA-262, but in V8 it is accessible using the `__proto__` property) whereas the `prototypeObject` is the value of the `prototype` property.
Here is an example.
```
{"handle":3,"type":"object","className":"Object","constructorFunction":{"ref":4},"protoObject":{"ref":5},"prototypeObject":{"ref":6},"properties":[{"name":"a","ref:7},{"name":"b","ref":8}]}
```
An function is encoded as an object but with additional information in the properties `name`, `inferredName`, `source` and `script`.
```
{ "handle" : <handle>,
"type" : "function",
"className" : "Function",
"constructorFunction" : {"ref":<handle>},
"protoObject" : {"ref":<handle>},
"prototypeObject" : {"ref":<handle>},
"name" : <function name>,
"inferredName" : <inferred function name for anonymous functions>
"source" : <function source>,
"script" : <reference to function script>,
"scriptId" : <id of function script>,
"position" : <function begin position in script>,
"line" : <function begin source line in script>,
"column" : <function begin source column in script>,
"properties" : [ {"name" : <name>,
"ref" : <handle>
},
...
]
}
```

63
deps/v8/docs/gdb_jit_interface.md

@ -1,63 +0,0 @@
# Prerequisites
* V8 3.0.9 or newer
* GDB 7.0 or newer
* Linux OS
* CPU with Intel-compatible architecture (ia32 or x64)
# Introduction
GDB JIT interface integration allows V8 to provide GDB with the symbol and debugging information for a native code emitted in runtime.
When GDB JIT interface is disabled a typical backtrace in GDB will contain frames marked with ??. This frames correspond to dynamically generated code:
```
#8 0x08281674 in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
#9 0xf5cae28e in ?? ()
#10 0xf5cc3a0a in ?? ()
#11 0xf5cc38f4 in ?? ()
#12 0xf5cbef19 in ?? ()
#13 0xf5cb09a2 in ?? ()
#14 0x0809e0a5 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
has_pending_exception=0xffffd46f) at src/execution.cc:97
```
However enabling GDB JIT integration allows GDB to produce more informative stack trace:
```
#6 0x082857fc in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
#7 0xf5cae28e in ?? ()
#8 0xf5cc3a0a in loop () at test.js:6
#9 0xf5cc38f4 in test.js () at test.js:13
#10 0xf5cbef19 in ?? ()
#11 0xf5cb09a2 in ?? ()
#12 0x0809e1f9 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
has_pending_exception=0xffffd44f) at src/execution.cc:97
```
Frames still unknown to GDB correspond to native code without source information. See [GDBJITInterface#KnownLimitations](GDBJITInterface#KnownLimitations.md) for more details.
GDB JIT interface is specified in the GDB documentation: http://sourceware.org/gdb/current/onlinedocs/gdb/JIT-Interface.html
# Enabling GDB JIT integration
GDBJIT currently is by default excluded from the compilation and disabled in runtime. To enable it:
1. Build V8 library with `ENABLE_GDB_JIT_INTERFACE` defined. If you are using scons to build V8 run it with `gdbjit=on`.
1. Pass `--gdbjit` flag when starting V8.
To check that you have enabled GDB JIT integration correctly try setting breakpoint on `__jit_debug_register_code`. This function will be invoked to notify GDB about new code objects.
# Known Limitations
* GDB side of JIT Interface currently (as of GDB 7.2) does not handle registration of code objects very effectively. Each next registration takes more time: with 500 registered objects each next registration takes more than 50ms, with 1000 registered code objects - more than 300 ms. This problem was reported to GDB developers (http://sourceware.org/ml/gdb/2011-01/msg00002.html) but currently there is no solution available. To reduce pressure on GDB current implementation of GDB JIT integration operates in two modes: _default_ and _full_ (enabled by `--gdbjit-full` flag). In _default_ mode V8 notifies GDB only about code objects that have source information attached (this usually includes all user scripts). In _full_ - about all generated code objects (stubs, ICs, trampolines).
* On x64 GDB is unable to properly unwind stack without `.eh_frame` section (Issue 1053 (on Google Code))
* GDB is not notified about code deserialized from the snapshot (Issue 1054 (on Google Code))
* Only Linux OS on Intel-compatible CPUs is supported. For different OSes either a different ELF-header should be generated or a completely different object format should be used.
* Enabling GDB JIT interface disables compacting GC. This is done to reduce pressure on GDB as unregistering and registering each moved code object will incur considerable overhead.
* GDB JIT integration provides only _approximate_ source information. It does not provide any information about local variables, function's arguments, stack layout etc. It does not enable stepping through JavaScript code or setting breakpoint on the given line. However one can set a breakpoint on a function by it's name.

24
deps/v8/docs/handling_of_ports.md

@ -1,24 +0,0 @@
# General
This article describes how ports should be handled.
# MIPS
## Straight-forward MIPS ports
1. Do them yourself.
## More complicated MIPS ports
1. CC the MIPS team in the CL. Use the mailing list v8-mips-ports.at.googlegroups.com for that purpose.
1. The MIPS team will provide you with a patch which you need to merge into your CL.
1. Then land the CL.
# PPC (not officially supported)
1. Contact/CC the PPC team in the CL if needed. Use the mailing list v8-ppc-ports.at.googlegroups.com for that purpose.
# x87 (not officially supported)
1. Contact/CC the x87 team in the CL if needed. Use the mailing list v8-x87-ports.at.googlegroups.com for that purpose.
# ARM
## Straight-forward ARM ports
1. Do them yourself.
## When you are lost
1. CC the ARM team in the CL. Use the mailing list v8-arm-ports.at.googlegroups.com for that purpose.

44
deps/v8/docs/i18n_support.md

@ -1,44 +0,0 @@
# ECMAScript 402
V8 optionally implements the [ECMAScript 402](http://www.ecma-international.org/ecma-402/1.0/) API. The API is enabled by default, but can be turned off at compile time.
## Prerequisites
The i18n implementation adds a dependency on ICU. If you run
```
make dependencies
```
a suitable version of ICU is checked out into `third_party/icu`.
### Alternative ICU checkout
You can check out the ICU sources at a different location and define the gyp variable `icu_gyp_path` to point at the `icu.gyp` file.
### System ICU
Last but not least, you can compile V8 against a version of ICU installed in your system. To do so, specify the gyp variable `use_system_icu=1`. If you also have `want_separate_host_toolset` enabled, the bundled ICU will still be compiled to generate the V8 snapshot. The system ICU will only be used for the target architecture.
## Embedding V8
If you embed V8 in your application, but your application itself doesn't use ICU, you will need to initialize ICU before calling into V8 by executing:
```
v8::V8::InitializeICU();
```
It is safe to invoke this method if ICU was not compiled in, then it does nothing.
## Compiling without i18n support
To build V8 without i18n support use
```
make i18nsupport=off native
```

6
deps/v8/docs/javascript.md

@ -1,6 +0,0 @@
# Introduction
JavaScript is a dynamically typed scripting language universally used to
script web content in browsers.
Its specification by ECMA can be found [here](http://www.ecma-international.org/publications/standards/Ecma-262.htm).

161
deps/v8/docs/javascript_stack_trace_api.md

@ -1,161 +0,0 @@
All internal errors thrown in V8 capture a stack trace when they are created that can be accessed from JavaScript through the error.stack property. V8 also has various hooks for controlling how stack traces are collected and formatted, and for allowing custom errors to also collect stack traces. This document outlines V8's JavaScript stack trace API.
### Basic stack traces
By default, almost all errors thrown by V8 have a `stack` property that holds the topmost 10 stack frames, formatted as a string. Here's an example of a fully formatted stack trace:
```
ReferenceError: FAIL is not defined
at Constraint.execute (deltablue.js:525:2)
at Constraint.recalculate (deltablue.js:424:21)
at Planner.addPropagate (deltablue.js:701:6)
at Constraint.satisfy (deltablue.js:184:15)
at Planner.incrementalAdd (deltablue.js:591:21)
at Constraint.addConstraint (deltablue.js:162:10)
at Constraint.BinaryConstraint (deltablue.js:346:7)
at Constraint.EqualityConstraint (deltablue.js:515:38)
at chainTest (deltablue.js:807:6)
at deltaBlue (deltablue.js:879:2)
```
The stack trace is collected when the error is created and is the same regardless of where or how many times the error is thrown. We collect 10 frames because it is usually enough to be useful but not so many that it has a noticeable performance impact. You can control how many stack frames are collected by setting the variable
```
Error.stackTraceLimit
```
Setting it to 0 will disable stack trace collection. Any finite integer value will be used as the maximum number of frames to collect. Setting it to `Infinity` means that all frames will be collected. This variable only affects the current context, it has to be set explicitly for each context that needs a different value. (Note that what is known as a "context" in V8 terminology corresponds to a page or iframe in Google Chrome). To set a different default value that affects all contexts use the
```
--stack-trace-limit <value>
```
command-line flag to V8. To pass this flag to V8 when running Google Chrome use
```
--js-flags="--stack-trace-limit <value>"
```
### Stack trace collection for custom exceptions
The stack trace mechanism used for built-in errors is implemented using a general stack trace collection API that is also available to user scripts. The function
```
Error.captureStackTrace(error, constructorOpt)
```
adds a stack property to the given `error` object that will yield the stack trace at the time captureStackTrace was called. The reason for not just returning the formatted stack trace directly is that this way we can postpone the formatting of the stack trace until the stack property is accessed and avoid formatting completely if it never is.
The optional `constructorOpt` parameter allows you to pass in a function value. When collecting the stack trace all frames above the topmost call to this function, including that call, will be left out of the stack trace. This can be useful to hide implementation details that won't be useful to the user. The usual way of defining a custom error that captures a stack trace would be:
```
function MyError() {
Error.captureStackTrace(this, MyError);
// any other initialization
}
```
Passing in MyError as a second argument means that the constructor call to MyError won't show up in the stack trace.
### Customizing stack traces
Unlike Java where the stack trace of an exception is a structured value that allows inspection of the stack state, the stack property in V8 just holds a flat string containing the formatted stack trace. This is for no other reason than compatibility with other browsers. However, this is not hardcoded but only the default behavior and can be overridden by user scripts.
For efficiency stack traces are not formatted when they are captured but on demand, the first time the stack property is accessed. A stack trace is formatted by calling
```
Error.prepareStackTrace(error, structuredStackTrace)
```
and using whatever this call returns as the value of the `stack` property. If you assign a different function value to `Error.prepareStackTrace` that function will be used to format stack traces. It will be passed the error object that it is preparing a stack trace for and a structured representation of the stack. User stack trace formatters are free to format the stack trace however they want and even return non-string values. It is safe to retain references to the structured stack trace object after a call to prepareStackTrace completes so that it is also a valid return value. Note that the custom prepareStackTrace function is immediately called at the point when the error object is created (e.g. with `new Error()`).
The structured stack trace is an Array of CallSite objects, each of which represents a stack frame. A CallSite object defines the following methods
* **getThis**: returns the value of this
* **getTypeName**: returns the type of this as a string. This is the name of the function stored in the constructor field of this, if available, otherwise the object's `[[Class]]` internal property.
* **getFunction**: returns the current function
* **getFunctionName**: returns the name of the current function, typically its name property. If a name property is not available an attempt will be made to try to infer a name from the function's context.
* **getMethodName**: returns the name of the property of this or one of its prototypes that holds the current function
* **getFileName**: if this function was defined in a script returns the name of the script
* **getLineNumber**: if this function was defined in a script returns the current line number
* **getColumnNumber**: if this function was defined in a script returns the current column number
* **getEvalOrigin**: if this function was created using a call to eval returns a CallSite object representing the location where eval was called
* **isToplevel**: is this a toplevel invocation, that is, is this the global object?
* **isEval**: does this call take place in code defined by a call to eval?
* **isNative**: is this call in native V8 code?
* **isConstructor**: is this a constructor call?
The default stack trace is created using the CallSite API so any information that is available there is also available through this API.
To maintain restrictions imposed on strict mode functions, frames that have a strict mode function and all frames below (its caller etc.) are not allow to access their receiver and function objects. For those frames, `getFunction()` and `getThis()` will return `undefined`.
### Compatibility
The API described here is specific to V8 and is not supported by any other JavaScript implementations. Most implementations do provide an `error.stack` property but the format of the stack trace is likely to be different from the format described here. The recommended use of this API is
* Only rely on the layout of the formatted stack trace if you know your code is running in v8.
* It is safe to set `Error.stackTraceLimit` and `Error.prepareStackTrace` regardless of which implementation is running your code but be aware that it will only have an effect if your code is running in V8.
### Appendix: Stack trace format
The default stack trace format used by V8 can for each stack frame give the following information:
* Whether the call is a construct call.
* The type of the this value (Type).
* The name of the function called (functionName).
* The name of the property of this or one of its prototypes that holds the function (methodName).
* The current location within the source (location)
Any of these may be unavailable and different formats for stack frames are used depending on how much of this information is available. If all the above information is available a formatted stack frame will look like this:
```
at Type.functionName [as methodName] (location)
```
or, in the case of a construct call
```
at new functionName (location)
```
If only one of functionName and methodName is available, or if they are both available but the same, the format will be:
```
at Type.name (location)
```
If neither is available `<anonymous>` will be used as the name.
The Type value is the name of the function stored in the constructor field of this. In v8 all constructor calls set this property to the constructor function so unless this field has been actively changed after the object was created it it will hold the name of the function it was created by. If it is unavailable the `[[Class]]` property of the object will be used.
One special case is the global object where the Type is not shown. In that case the stack frame will be formatted as
```
at functionName [as methodName] (location)
```
The location itself has several possible formats. Most common is the file name, line and column number within the script that defined the current function
```
fileName:lineNumber:columnNumber
```
If the current function was created using eval the format will be
```
eval at position
```
where position is the full position where the call to eval occurred. Note that this means that positions can be nested if there are nested calls to eval, for instance:
```
eval at Foo.a (eval at Bar.z (myscript.js:10:3))
```
If a stack frame is within V8's libraries the location will be
```
native
```
and if is unavailable it will be
```
unknown location
```

67
deps/v8/docs/merging_and_patching.md

@ -1,67 +0,0 @@
# Introduction
If you have a patch to the master branch (e.g. an important bug fix) that needs to be merged into one of the production V8 branches, read on.
For the examples, a branched 2.4 version of V8 will be used. Substitute "2.4" with your version number.
**An associated issue on Chromium's or V8's issue tracker is mandatory if a patch is merged. This helps with keeping track of merges.
You can use [a template](https://code.google.com/p/v8/issues/entry?template=Merge%20request) to create an issue.**
# Merge process outlined
The merge process in the Chromium and V8 tracker is driven by labels in the form of
```
Merge-[Status]-[Branch]
```
The currently important labels for V8 are:
1. Merge-Request-## initiates the process => This fix should be merged into M-##
1. Merge-Review-## The merge is not approved yet for M-## e.g. because Canary coverage is missing
1. Merge-Approved-## => Simply means that the Chrome TPM are signing the merge off
1. Merge-Merged-$BRANCHNUMBER$ => When the merge is done the Merge-Approved label is swapped with this one. $BRANCHNUMBER$ is the name/number of the V8 branch e.g. 4.3 for M-43.
# Instructions for git using the automated script
## How to check if a commit was already merged/reverted
Use mergeinfo.py to get all the commits which are connected to the HASH according to Git.
```
tools/release/mergeinfo.py HASH
```
## Step 1: Run the script
Let's assume you're merging revision af3cf11 to branch 2.4 (please specify full git hashes - abbreviations are used here for simplicity).
```
tools/release/merge_to_branch.py --branch 2.4 af3cf11
```
Run the script with '-h' to display its help message, which includes more options (e.g. you can specify a file containing your patch, or you can reverse a patch, specify a custom commit message, or resume a merging process you've canceled before). Note that the script will use a temporary checkout of v8 - it won't touch your work space.
You can also merge more than one revision at once, just list them all.
```
tools/release/merge_to_branch.py --branch 2.4 af3cf11 cf33f1b sf3cf09
```
## Step 2: Send a notification letter to hablich@chromium.org
Saying something like this:
```
_Subject:_ Regression fix merged into V8 2.4 branch (Chrome 8)
_Body:_ We have merged a fix to the V8 version 2.4 branch (the version used in Chrome 8)
Version 2.4.9.10: Issue xxx: The parser doesn't parse.
```
# FAQ
## I get an error during merge that is related to tagging. What should I do?
When two people are merging at the same time a race-condition can happen in the merge scripts. If this is the case, contact machenbach@chromium.org and hablich@chromium.org.
## Is there a TL;DR;?
1. [Create issue on issue tracker](https://code.google.com/p/v8/issues/entry?template=Merge%20request)
1. Add Merge-Request-{Branch} to the issue
1. Wait until somebody will add Merge-Approved-{Branch}
1. Merge

34
deps/v8/docs/profiling_chromium_with_v8.md

@ -1,34 +0,0 @@
# Introduction
V8's CPU & Heap profilers are trivial to use from V8's shells (see V8Profiler), but it may appear confusing how to use them with Chromium. This page should help you with it.
# Instructions
## Why using V8's profilers with Chromium is different from using them with V8 shells?
Chromium is a complex application, unlike V8 shells. Below is the list of Chromium features that affect profiler usage:
* each renderer is a separate process (OK, not actually each, but let's omit this detail), so they can't share the same log file;
* sandbox built around renderer process prevents it from writing to a disk;
* Developer Tools configure profilers for their own purposes;
* V8's logging code contains some optimizations to simplify logging state checks.
## So, how to run Chromium to get a CPU profile?
Here is how to run Chromium in order to get a CPU profile from the start of the process:
```
./Chromium --no-sandbox --js-flags="--logfile=%t.log --prof"
```
Please note that you wouldn't see profiles in Developer Tools, because all the data is being logged to a file, not to Developer Tools.
### Flags description
* **--no-sandbox** - turns off the renderer sandbox, obviously must have;
* **--js-flags** - this is the containers for flags passed to V8:
* **--logfile=%t.log** - specifies a name pattern for log files; **%t** gets expanded into current time in milliseconds, so each process gets its own log file; you can use prefixes and suffixes if you want, like this: **prefix-%t-suffix.log**;
* **--prof** - tells V8 to write statistical profiling information into the log file.
## Notes
Under Windows, be sure to turn on .MAP file creation for **chrome.dll**, but not for **chrome.exe**.

57
deps/v8/docs/release_process.md

@ -1,57 +0,0 @@
# Introduction
The V8 release process is tightly connected to [Chrome's](https://www.chromium.org/getting-involved/dev-channel). The V8 team is using all four Chrome release channels to push new versions to the users.
If you want to look up what V8 version is in a Chrome release you can check [OmahaProxy](https://omahaproxy.appspot.com/). For each Chrome release a separate branch is created in the V8 repository to make the trace-back easier e.g. for [Chrome 45.0.2413.0](https://chromium.googlesource.com/v8/v8.git/+/chromium/2413).
# Canary releases
Every day a new Canary build is pushed to the users via [Chrome's Canary channel](https://www.google.com/chrome/browser/canary.html?platform=win64). Normally the deliverable is the latest, stable enough version from [master](https://chromium.googlesource.com/v8/v8.git/+/roll).
Branches for a Canary normally look like this
```
remotes/origin/4.5.35
```
# Dev releases
Every week a new Dev build is pushed to the users via [Chrome's Dev channel](https://www.google.com/chrome/browser/desktop/index.html?extra=devchannel&platform=win64). Normally the deliverable includes the latest stable enough V8 version on the Canary channel.
Branches for a Dev normally look like this
```
remotes/origin/4.5.35
```
# Beta releases
Roughly every 6 weeks a new major branch is created e.g. [for Chrome 44](https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.4). This is happening in sync with the creation of [Chrome's Beta channel](https://www.google.com/chrome/browser/beta.html?platform=win64). The Chrome Beta is pinned to the head of V8's branch. After approx. 6 weeks the branch is promoted to Stable.
Changes are only cherry-picked onto the branch in order to stabilize the version.
Branches for a Beta normally look like this
```
remotes/branch-heads/4.5
```
They are based on a Canary branch.
# Stable releases
Roughly every 6 weeks a new major Stable release is done. No special branch is created as the latest Beta branch is simply promoted to Stable. This version is pushed to the users via [Chrome's Stable channel](https://www.google.com/chrome/browser/desktop/index.html?platform=win64).
Branches for a Stable normally look like this
```
remotes/branch-heads/4.5
```
They are promoted (reused) Beta branches.
# Which version should I embed in my application?
The tip of the same branch that Chrome's Stable channel uses.
We often backmerge important bug fixes to a stable branch, so if you care about stability and security and correctness, you should include those updates too -- that's why we recommend "the tip of the branch", as opposed to an exact version.
As soon as a new branch is promoted to Stable, we stop maintaining the previous stable branch. This happens every six weeks, so you should be prepared to update at least this often.
Example: The current stable Chrome release is [44.0.2403.125](https://omahaproxy.appspot.com), with V8 4.4.63.25. So you should embed [branch-heads/4.4](https://chromium.googlesource.com/v8/v8.git/+/branch-heads/4.4). And you should update to branch-heads/4.5 when Chrome 45 is released on the Stable channel.

7
deps/v8/docs/runtime_functions.md

@ -1,7 +0,0 @@
# Runtime functions
Much of the JavaScript library is implemented in JavaScript code itself,
using a minimal set of C++ runtime functions callable from JavaScript.
Some of these are called using names that start with %, and using the flag
"--allow-natives-syntax". Others are only called by code generated by the
code generators, and are not visible in JS, even using the % syntax.

41
deps/v8/docs/source.md

@ -1,41 +0,0 @@
# Source
**Quick links:** [browse](https://chromium.googlesource.com/v8/v8/) | [browse bleeding edge](https://chromium.googlesource.com/v8/v8/+/master) | [changes](https://chromium.googlesource.com/v8/v8/+log/master).
## Command-Line Access
### Git
See [UsingGit](using_git.md).
### Subversion (deprecated)
Use this command to anonymously check out the up-to-date stable version of the project source code:
> `svn checkout http://v8.googlecode.com/svn/trunk/ v8`
If you plan to contribute to V8 but are not a member, use this command to anonymously check out a read-only version of the development branch:
> `svn checkout http://v8.googlecode.com/svn/branches/bleeding_edge/ v8`
If you're a member of the project, use this command to check out a writable development branch as yourself using HTTPS:
> `svn checkout https://v8.googlecode.com/svn/branches/bleeding_edge/ v8 --username <your username>`
When prompted, enter your generated [googlecode.com](http://code.google.com/hosting/settings) password.
## Source Code Branches
There are several different branches of V8; if you're unsure of which version to get, you most likely want the up-to-date stable version in `trunk/`. Here's an overview of the different branches:
* The bleeding edge, `branches/bleeding_edge/`, is where active development takes place. If you're considering contributing to V8 this is the branch to get.
* Under `trunk/` is the "stable edge", which is updated a few times per week. It is a copy of the bleeding edge that has been successfully tested. Use this if you want to be almost up to date and don't want your code to break whenever we accidentally forget to add a file on the bleeding edge. Some of the trunk revisions are tagged with X.Y.Z.T version labels. When we decide which of X.Y.**.** is the "most stable", it becomes the X.Y branch in subversion.
* If you want a well-tested version that doesn't change except for bugfixes, use one of the versioned branches (e.g. `branches/3.16/` at the time of this writing). Note that usually only the last two branches are actively maintained; any older branches could have unfixed security holes. You may want to follow the V8 version that Chrome is shipping on its stable (or beta) channels, see http://omahaproxy.appspot.com.
## V8 public API compatibility
V8 public API (basically the files under include/ directory) may change over time. New types/methods may be added without breaking existing functionality. When we decide that want to drop some existing class/methods, we first mark it with [V8\_DEPRECATED](https://code.google.com/p/chromium/codesearch#search/&q=V8_DEPRECATED&sq=package:chromium&type=cs) macro which will cause compile time warnings when the deprecated methods are called by the embedder. We keep deprecated method for one branch and then remove it. E.g. if `v8::CpuProfiler::FindCpuProfile` was plain non deprecated in _3.17_ branch, marked as `V8_DEPRECATED` in _3.18_, it may well be removed in _3.19_ branch.
## GUI and IDE Access
This project's Subversion repository may be accessed using many different client programs and plug-ins. See your client's documentation for more information.

58
deps/v8/docs/testing.md

@ -1,58 +0,0 @@
V8 includes a test framework that allows you to test the engine. The framework lets you run both our own test suites that are included with the source code and others, currently only the Mozilla tests.
## Running the V8 tests
Before you run the tests, you will have to build V8 with GYP using the instructions [here](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP)
You can append `.check` to any build target to have tests run for it, e.g.
```
make ia32.release.check
make ia32.check
make release.check
make check # builds and tests everything (no dot before "check"!)
```
Before submitting patches, you should always run the quickcheck target, which builds a fast debug build and runs only the most relevant tests:
```
make quickcheck
```
You can also run tests manually:
```
tools/run-tests.py --arch-and-mode=ia32.release [--outdir=foo]
```
Or you can run individual tests:
```
tools/run-tests.py --arch=ia32 cctest/test-heap/SymbolTable mjsunit/delete-in-eval
```
Run the script with `--help` to find out about its other options, `--outdir` defaults to `out`. Also note that using the `cctest` binary to run multiple tests in one process is not supported.
## Running the Mozilla and Test262 tests
The V8 test framework comes with support for running the Mozilla as well as the Test262 test suite. To download the test suites and then run them for the first time, do the following:
```
tools/run-tests.py --download-data mozilla
tools/run-tests.py --download-data test262
```
To run the tests subsequently, you may omit the flag that downloads the test suite:
```
tools/run-tests.py mozilla
tools/run-tests.py test262
```
Note that V8 fails a number of Mozilla tests because they require Firefox-specific extensions.
## Running the WebKit tests
Sometimes all of the above tests pass but WebKit build bots fail. To make sure WebKit tests pass run:
```
tools/run-tests.py --progress=verbose --outdir=out --arch=ia32 --mode=release webkit --timeout=200
```
Replace --arch and other parameters with values that match your build options.

22
deps/v8/docs/triaging_issues.md

@ -1,22 +0,0 @@
# How to get an issue triaged
* *V8 tracker*: Set the state to `Untriaged`
* *Chromium tracker*: Set the state to `Untriaged` and add the label `Cr-Blink-JavaScript`
# How to assign V8 issues in the Chromium tracker
Please assign issues to the V8 specialty sheriffs of one of the
following categories:
* Stability: jkummerow@c....org, adamk@c....org
* Performance: bmeurer@c....org, mvstanton@c....org
* Clusterfuzz: Set the bug to the following state:
* `label:ClusterFuzz label:Cr-Blink-JavaScript status:Available -has:owner`
* Will show up in [this](https://code.google.com/p/chromium/issues/list?can=2&q=label%3AClusterFuzz+label%3ACr-Blink-JavaScript+status%3AAvailable+-has%3Aowner&colspec=ID+Pri+M+Week+ReleaseBlock+Cr+Status+Owner+Summary+OS+Modified&x=m&y=releaseblock&cells=tiles) query.
* CC mstarzinger@ and ishell@
Please CC hablich@c....org on all issues.
Assign remaining issues to hablich@c....org.
Use the label Cr-Blink-JavaScript on all issues.
**Please note that this only applies to issues tracked in the Chromium issue tracker.**

147
deps/v8/docs/using_git.md

@ -1,147 +0,0 @@
# Git repository
V8's git repository is located at https://chromium.googlesource.com/v8/v8.git
V8's master branch has also an official git mirror on github: http://github.com/v8/v8-git-mirror.
**Don't just `git-clone` either of these URLs** if you want to build V8 from your checkout, instead follow the instructions below to get everything set up correctly.
## Prerequisites
1. **Git**. To install using `apt-get`:
```
apt-get install git
```
1. **depot\_tools**. See [instructions](http://dev.chromium.org/developers/how-tos/install-depot-tools).
1. For **push access**, you need to setup a .netrc file with your git password:
1. Go to https://chromium.googlesource.com/new-password - login with your committer account (e.g. @chromium.org account, non-chromium.org ones work too). Note: creating a new password doesn't automatically revoke any previously created passwords.
1. Follow the instructions in the "Staying Authenticated" section. It would ask you to copy-paste two lines into your ~/.netrc file.
1. In the end, ~/.netrc should have two lines that look like:
```
machine chromium.googlesource.com login git-yourusername.chromium.org password <generated pwd>
machine chromium-review.googlesource.com login git-yourusername.chromium.org password <generated pwd>
```
1. Make sure that ~/.netrc file's permissions are 0600 as many programs refuse to read .netrc files which are readable by anyone other than you.
## How to start
Make sure depot\_tools are up-to-date by typing once:
```
gclient
```
Then get V8, including all branches and dependencies:
```
fetch v8
cd v8
```
After that you're intentionally in a detached head state.
Optionally you can specify how new branches should be tracked:
```
git config branch.autosetupmerge always
git config branch.autosetuprebase always
```
Alternatively, you can create new local branches like this (recommended):
```
git new-branch mywork
```
## Staying up-to-date
Update your current branch with git pull. Note that if you're not on a branch, git pull won't work, and you'll need to use git fetch instead.
```
git pull
```
Sometimes dependencies of v8 are updated. You can synchronize those by running
```
gclient sync
```
## Sending code for reviewing
```
git cl upload
```
## Committing
You can use the CQ checkbox on codereview for committing (preferred). See also the [chromium instructions](http://www.chromium.org/developers/testing/commit-queue) for CQ flags and troubleshooting.
If you need more trybots than the default, add the following to your commit message on rietveld (e.g. for adding a nosnap bot):
```
CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_nosnap_rel
```
To land manually, update your branch:
```
git pull --rebase origin
```
Then commit using
```
git cl land
```
# For project members
## Try jobs
### Creating a try job from codereview
1. Upload a CL to rietveld.
```
git cl upload
```
1. Try the CL by sending a try job to the try bots like this:
```
git cl try
```
1. Wait for the try bots to build and you will get an e-mail with the result. You can also check the try state at your patch on codereview.
1. If applying the patch fails you either need to rebase your patch or specify the v8 revision to sync to:
```
git cl try --revision=1234
```
### Creating a try job from a local branch
1. Commit some changes to a git branch in the local repo.
1. Try the change by sending a try job to the try bots like this:
```
git try
```
1. Wait for the try bots to build and you will get an e-mail with the result. Note: There are issues with some of the slaves at the moment. Sending try jobs from codereview is recommended.
### Useful arguments
The revision argument tells the try bot what revision of the code base will be used for applying your local changes to. Without the revision, our LKGR revision is used as the base (http://v8-status.appspot.com/lkgr).
```
git try --revision=1234
```
To avoid running your try job on all bots, use the --bot flag with a comma-separated list of builder names. Example:
```
git try --bot=v8_mac_rel
```
### Viewing the try server
http://build.chromium.org/p/tryserver.v8/waterfall
### Access credentials
If asked for access credentials, use your @chromium.org email address and your generated password from [googlecode.com](http://code.google.com/hosting/settings).

3
deps/v8/docs/v8_c_plus_plus_styleand_sops.md

@ -1,3 +0,0 @@
# V8 C++ Style Guide
In general, V8 should conform to Google's/Chrome's C++ Style Guide for new code that is written. Your V8 code should conform to them as much as possible. There will always be cases where Google/Chrome Style Guide conformity or Google/Chrome best practices are extremely cumbersome or underspecified for our use cases. We document these exceptions here.

39
deps/v8/docs/v8_committers_responsibility.md

@ -1,39 +0,0 @@
# V8 committer's responsibility
## Basic commit guidelines
When you're committing to the V8 repositories, ensure that you follow those guidelines:
1. Find the right reviewer for your changes and for patches you're asked to review.
1. Be available on IM and/or email before and after you land the change.
1. Watch the [waterfall](http://build.chromium.org/p/client.v8/console) until all bots turn green after your change.
1. When landing a TBR change (To Be Reviewed), make sure to notify the people whose code you're changing. Usually just send the review e-mail.
In short, do the right thing for the project, not the easiest thing to get code committed, and above all: use your best judgement.
**Don't be afraid to ask questions. There is always someone who will immediately read messages sent to the v8-committers mailing list who can help you.**
## Changes with multiple reviewers
There are occasionally changes with a lot of reviewers on them, since sometimes several people might need to be in the loop for a change because of multiple areas of responsibility and expertise.
The problem is that without some guidelines, there's no clear responsibility given in these reviews.
If you're the sole reviewer on a change, you know you have to do a good job. When there are three other people, you sometimes assume that somebody else must have looked carefully at some part of the review. Sometimes all the reviewers think this and the change isn't reviewed properly.
In other cases, some reviewers say "LGTM" for a patch, while others are still expecting changes. The author can get confused as to the status of the review, and some patches have been checked in where at least one reviewer expected further changes before committing.
At the same time, we want to encourage many people to participate in the review process and keep tabs on what's going on.
So, here are some guidelines to help clarify the process:
1. When a patch author requests more than one reviewer, they should make clear in the review request email what they expect the responsibility of each reviewer to be. For example, you could write this in the email:
* larry: bitmap changes
* sergey: process hacks
* everybody else: FYI
1. In this case, you might be on the review list because you've asked to be in the loop for multiprocess changes, but you wouldn't be the primary reviewer and the author and other reviewers wouldn't be expecting you to review all the diffs in detail.
1. If you get a review that includes many other people, and the author didn't do (1), please ask them what part you're responsible for if you don't want to review the whole thing in detail.
1. The author should wait for approval from everybody on the reviewer list before checking in.
1. People who are on a review without clear review responsibility (i.e. drive-by reviews) should be super responsive and not hold up the review. The patch author should feel free to ping them mercilessly if they are.
1. If you're an "FYI" person on a review and you didn't actually review in detail (or at all), but don't have a problem with the patch, note this. You could say something like "rubber stamp" or "ACK" instead of "LGTM." This way the real reviewers know not to trust that you did their work for them, but the author of the patch knows they don't have to wait for further feedback from you. Hopefully we can still keep everybody in the loop but have clear ownership and detailed reviews. It might even speed up some changes since you can quickly "ACK" changes you don't care about, and the author knows they don't have to wait for feedback from you.
(Adapted from: http://dev.chromium.org/developers/committers-responsibility )

141
deps/v8/docs/v8_profiler.md

@ -1,141 +0,0 @@
# Introduction
V8 has built-in sample based profiling. Profiling is turned off by default, but can be enabled via the --prof command line option. The sampler records stacks of both JavaScript and C/C++ code.
# Build
Build the d8 shell following the instructions at [BuildingWithGYP](BuildingWithGYP.md).
# Command Line
To start profiling, use the `--prof` option. When profiling, V8 generates a `v8.log` file which contains profiling data.
Windows:
```
build\Release\d8 --prof script.js
```
Other platforms (replace "ia32" with "x64" if you want to profile the x64 build):
```
out/ia32.release/d8 --prof script.js
```
# Process the Generated Output
Log file processing is done using JS scripts running by the d8 shell. For this to work, a `d8` binary (or symlink, or `d8.exe` on Windows) must be in the root of your V8 checkout, or in the path specified by the environment variable `D8_PATH`. Note: this binary is just used to process the log, but not for the actual profiling, so it doesn't matter which version etc. it is.
Windows:
```
tools\windows-tick-processor.bat v8.log
```
Linux:
```
tools/linux-tick-processor v8.log
```
Mac OS X:
```
tools/mac-tick-processor v8.log
```
## Snapshot-based VM build and builtins reporting
When a snapshot-based VM build is being used, code objects from a snapshot that don't correspond to functions are reported with generic names like _"A builtin from the snapshot"_, because their real names are not stored in the snapshot. To see the names the following steps must be taken:
* `--log-snapshot-positions` flag must be passed to VM (along with `--prof`); this way, for deserialized objects the `(memory address, snapshot offset)` pairs are being emitted into profiler log;
* `--snapshot-log=<log file from mksnapshot>` flag must be passed to the tick processor script; a log file from the `mksnapshot` program (a snapshot log) contains address-offset pairs for serialized objects, and their names; using the snapshot log, names can be mapped onto deserialized objects during profiler log processing; the snapshot log file is called `snapshot.log` and resides alongside with V8's compiled files.
An example of usage:
```
out/ia32.release/d8 --prof --log-snapshot-positions script.js
tools/linux-tick-processor --snapshot-log=out/ia32.release/obj.target/v8_snapshot/geni/snapshot.log v8.log
```
# Programmatic Control of Profiling
If you would like to control in your application when profile samples are collected, you can do so.
First you'll probably want to use the `--noprof-auto` command line switch which prevents the profiler from automatically starting to record profile ticks.
Profile ticks will not be recorded until your application specifically invokes these APIs:
* `V8::ResumeProfiler()` - start/resume collection of data
* `V8::PauseProfiler()` - pause collection of data
# Example Output
```
Statistical profiling result from benchmarks\v8.log, (4192 ticks, 0 unaccounted, 0 excluded).
[Shared libraries]:
ticks total nonlib name
9 0.2% 0.0% C:\WINDOWS\system32\ntdll.dll
2 0.0% 0.0% C:\WINDOWS\system32\kernel32.dll
[JavaScript]:
ticks total nonlib name
741 17.7% 17.7% LazyCompile: am3 crypto.js:108
113 2.7% 2.7% LazyCompile: Scheduler.schedule richards.js:188
103 2.5% 2.5% LazyCompile: rewrite_nboyer earley-boyer.js:3604
103 2.5% 2.5% LazyCompile: TaskControlBlock.run richards.js:324
96 2.3% 2.3% Builtin: JSConstructCall
...
[C++]:
ticks total nonlib name
94 2.2% 2.2% v8::internal::ScavengeVisitor::VisitPointers
33 0.8% 0.8% v8::internal::SweepSpace
32 0.8% 0.8% v8::internal::Heap::MigrateObject
30 0.7% 0.7% v8::internal::Heap::AllocateArgumentsObject
...
[GC]:
ticks total nonlib name
458 10.9%
[Bottom up (heavy) profile]:
Note: percentage shows a share of a particular caller in the total
amount of its parent calls.
Callers occupying less than 2.0% are not shown.
ticks parent name
741 17.7% LazyCompile: am3 crypto.js:108
449 60.6% LazyCompile: montReduce crypto.js:583
393 87.5% LazyCompile: montSqrTo crypto.js:603
212 53.9% LazyCompile: bnpExp crypto.js:621
212 100.0% LazyCompile: bnModPowInt crypto.js:634
212 100.0% LazyCompile: RSADoPublic crypto.js:1521
181 46.1% LazyCompile: bnModPow crypto.js:1098
181 100.0% LazyCompile: RSADoPrivate crypto.js:1628
...
```
# Timeline plot
The timeline plot visualizes where V8 is spending time. This can be used to find bottlenecks and spot things that are unexpected (for example, too much time spent in the garbage collector). Data for the plot are gathered by both sampling and instrumentation. Linux with gnuplot 4.6 is required.
To create a timeline plot, run V8 as described above, with the option `--log-timer-events` additional to `--prof`:
```
out/ia32.release/d8 --prof --log-timer-events script.js
```
The output is then passed to a plot script, similar to the tick-processor:
```
tools/plot-timer-events v8.log
```
This creates `timer-events.png` in the working directory, which can be opened with most image viewers.
# Options
Since recording log output comes with a certain performance overhead, the script attempts to correct this using a distortion factor. If not specified, it tries to find out automatically. You can however also specify the distortion factor manually.
```
tools/plot-timer-events --distortion=4500 v8.log
```
You can also manually specify a certain range for which to create the plot or statistical profile, expressed in milliseconds:
```
tools/plot-timer-events --distortion=4500 --range=1000,2000 v8.log
tools/linux-tick-processor --distortion=4500 --range=1000,2000 v8.log
```
# HTML 5 version
Both statistical profile and timeline plot are available [in the browser](http://v8.googlecode.com/svn/branches/bleeding_edge/tools/profviz/profviz.html). However, the statistical profile lacks C++ symbol resolution and the Javascript port of gnuplot performs an order of magnitude slower than the native one.

28
deps/v8/include/v8-debug.h

@ -155,8 +155,11 @@ class V8_EXPORT Debug {
*/
typedef void (*DebugMessageDispatchHandler)();
static bool SetDebugEventListener(EventCallback that,
static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
V8_DEPRECATED("Use version with an Isolate",
static bool SetDebugEventListener(
EventCallback that, Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@ -170,7 +173,9 @@ class V8_EXPORT Debug {
static bool CheckDebugBreak(Isolate* isolate);
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(MessageHandler handler);
static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
V8_DEPRECATED("Use version with an Isolate",
static void SetMessageHandler(MessageHandler handler));
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@ -194,10 +199,9 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Value> Call(v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
static V8_DEPRECATED("Use maybe version",
Local<Value> Call(v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@ -206,8 +210,8 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<Value> GetMirror(v8::Local<v8::Value> obj));
static V8_DEPRECATED("Use maybe version",
Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
@ -242,7 +246,9 @@ class V8_EXPORT Debug {
* "Evaluate" debug command behavior currently is not specified in scope
* of this method.
*/
static void ProcessDebugMessages();
static void ProcessDebugMessages(Isolate* isolate);
V8_DEPRECATED("Use version with an Isolate",
static void ProcessDebugMessages());
/**
* Debugger is running in its own context which is entered while debugger
@ -251,7 +257,9 @@ class V8_EXPORT Debug {
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext();
static Local<Context> GetDebugContext(Isolate* isolate);
V8_DEPRECATED("Use version with an Isolate",
static Local<Context> GetDebugContext());
/**

53
deps/v8/include/v8-experimental.h

@ -0,0 +1,53 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* This header contains a set of experimental V8 APIs. We hope these will
* become a part of standard V8, but they may also be removed if we deem the
* experiment to not be successul.
*/
#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
#define V8_INCLUDE_V8_EXPERIMENTAL_H_
#include "include/v8.h"
namespace v8 {
namespace experimental {
// Allow the embedder to construct accessors that V8 can compile and use
// directly, without jumping into the runtime.
class V8_EXPORT FastAccessorBuilder {
public:
struct ValueId {
size_t value_id;
};
struct LabelId {
size_t label_id;
};
static FastAccessorBuilder* New(Isolate* isolate);
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
private:
FastAccessorBuilder() = delete;
FastAccessorBuilder(const FastAccessorBuilder&) = delete;
~FastAccessorBuilder() = delete;
void operator=(const FastAccessorBuilder&) = delete;
};
} // namespace experimental
} // namespace v8
#endif // V8_INCLUDE_V8_EXPERIMENTAL_H_

47
deps/v8/include/v8-platform.h

@ -5,6 +5,8 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
#include <stdint.h>
namespace v8 {
class Isolate;
@ -107,6 +109,51 @@ class Platform {
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
/**
* Called by TRACE_EVENT* macros, don't call this directly.
* The name parameter is a category group for example:
* TRACE_EVENT0("v8,parse", "V8.Parse")
* The pointer returned points to a value with zero or more of the bits
* defined in CategoryGroupEnabledFlags.
**/
virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
static uint8_t no = 0;
return &no;
}
/**
* Gets the category group name of the given category_enabled_flag pointer.
* Usually used while serliazing TRACE_EVENTs.
**/
virtual const char* GetCategoryGroupName(
const uint8_t* category_enabled_flag) {
static const char dummy[] = "dummy";
return dummy;
}
/**
* Adds a trace event to the platform tracing system. This function call is
* usually the result of a TRACE_* macro from trace_event_common.h when
* tracing and the category of the particular trace are enabled. It is not
* advisable to call this function on its own; it is really only meant to be
* used by the trace macros. The returned handle can be used by
* UpdateTraceEventDuration to update the duration of COMPLETE events.
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags) {
return 0;
}
/**
* Sets the duration field of a COMPLETE trace event. It must be called with
* the handle returned from AddTraceEvent().
**/
virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) {}
};
} // namespace v8

2
deps/v8/include/v8-testing.h

@ -39,7 +39,7 @@ class V8_EXPORT Testing {
/**
* Force deoptimization of all functions.
*/
static void DeoptimizeAll();
static void DeoptimizeAll(Isolate* isolate);
};

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 8
#define V8_BUILD_NUMBER 271
#define V8_PATCH_LEVEL 17
#define V8_MINOR_VERSION 9
#define V8_BUILD_NUMBER 385
#define V8_PATCH_LEVEL 18
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

417
deps/v8/include/v8.h

@ -12,8 +12,8 @@
* For other documentation see http://code.google.com/apis/v8/
*/
#ifndef V8_H_
#define V8_H_
#ifndef INCLUDE_V8_H_
#define INCLUDE_V8_H_
#include <stddef.h>
#include <stdint.h>
@ -92,6 +92,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class Promise;
class Proxy;
class RawOperationDescriptor;
class Script;
class SharedArrayBuffer;
@ -136,6 +137,10 @@ class CallHandlerHelper;
class EscapableHandleScope;
template<typename T> class ReturnValue;
namespace experimental {
class FastAccessorBuilder;
} // namespace experimental
namespace internal {
class Arguments;
class Heap;
@ -420,12 +425,12 @@ class WeakCallbackInfo {
V8_INLINE T* GetParameter() const { return parameter_; }
V8_INLINE void* GetInternalField(int index) const;
V8_INLINE V8_DEPRECATE_SOON("use indexed version",
void* GetInternalField1() const) {
V8_INLINE V8_DEPRECATED("use indexed version",
void* GetInternalField1() const) {
return internal_fields_[0];
}
V8_INLINE V8_DEPRECATE_SOON("use indexed version",
void* GetInternalField2() const) {
V8_INLINE V8_DEPRECATED("use indexed version",
void* GetInternalField2() const) {
return internal_fields_[1];
}
@ -551,13 +556,13 @@ template <class T> class PersistentBase {
* critical form of resource management!
*/
template <typename P>
V8_INLINE V8_DEPRECATE_SOON(
V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<T, P>::Callback callback));
template <typename S, typename P>
V8_INLINE V8_DEPRECATE_SOON(
V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<S, P>::Callback callback));
@ -569,7 +574,7 @@ template <class T> class PersistentBase {
// specify a parameter for the callback or the location of two internal
// fields in the dying object.
template <typename P>
V8_INLINE V8_DEPRECATE_SOON(
V8_INLINE V8_DEPRECATED(
"use SetWeak",
void SetPhantom(P* parameter,
typename WeakCallbackInfo<P>::Callback callback,
@ -1313,10 +1318,10 @@ class V8_EXPORT ScriptCompiler {
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<UnboundScript> CompileUnbound(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
static V8_DEPRECATED("Use maybe version",
Local<UnboundScript> CompileUnbound(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
@ -1332,7 +1337,7 @@ class V8_EXPORT ScriptCompiler {
* when this function was called. When run it will always use this
* context.
*/
static V8_DEPRECATE_SOON(
static V8_DEPRECATED(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
@ -1362,11 +1367,11 @@ class V8_EXPORT ScriptCompiler {
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, StreamedSource* source,
Local<String> full_source_string,
const ScriptOrigin& origin));
static V8_DEPRECATED("Use maybe version",
Local<Script> Compile(Isolate* isolate,
StreamedSource* source,
Local<String> full_source_string,
const ScriptOrigin& origin));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, StreamedSource* source,
Local<String> full_source_string, const ScriptOrigin& origin);
@ -1492,7 +1497,7 @@ class V8_EXPORT Message {
* Returns the index within the line of the last character where
* the error occurred.
*/
V8_DEPRECATE_SOON("Use maybe version", int GetEndColumn() const);
V8_DEPRECATED("Use maybe version", int GetEndColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
/**
@ -1604,8 +1609,7 @@ class V8_EXPORT StackFrame {
/**
* Returns the name of the resource that contains the script for the
* function for this StackFrame or sourceURL value if the script name
* is undefined and its source ends with //# sourceURL=... string or
* deprecated //@ sourceURL=... string.
* is undefined and its source ends with //# sourceURL=... string.
*/
Local<String> GetScriptNameOrSourceURL() const;
@ -1661,8 +1665,8 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<Value> Parse(Local<String> json_string));
static V8_DEPRECATED("Use maybe version",
Local<Value> Parse(Local<String> json_string));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
Isolate* isolate, Local<String> json_string);
};
@ -1737,7 +1741,8 @@ class V8_EXPORT Value : public Data {
bool IsFunction() const;
/**
* Returns true if this value is an array.
* Returns true if this value is an array. Note that it will return false for
* an Proxy for an array.
*/
bool IsArray() const;
@ -1950,6 +1955,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsSharedArrayBuffer() const;
/**
* Returns true if this value is a JavaScript Proxy.
*/
bool IsProxy() const;
V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
Local<Context> context) const;
@ -1973,34 +1983,34 @@ class V8_EXPORT Value : public Data {
Local<Number> ToNumber(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToString(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToDetailString(Isolate* isolate) const);
V8_DEPRECATED("Use maybe version",
Local<String> ToDetailString(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> ToObject(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Uint32> ToUint32(Isolate* isolate) const);
V8_DEPRECATED("Use maybe version",
Local<Uint32> ToUint32(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Int32> ToInt32(Isolate* isolate) const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Boolean> ToBoolean() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Number> ToNumber() const);
inline V8_DEPRECATED("Use maybe version", Local<Number> ToNumber() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString() const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToDetailString() const);
inline V8_DEPRECATED("Use maybe version",
Local<String> ToDetailString() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject() const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToUint32() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Int32> ToInt32() const);
inline V8_DEPRECATED("Use maybe version", Local<Uint32> ToUint32() const);
inline V8_DEPRECATED("Use maybe version", Local<Int32> ToInt32() const);
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToArrayIndex() const);
V8_DEPRECATED("Use maybe version", Local<Uint32> ToArrayIndex() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
Local<Context> context) const;
@ -2188,6 +2198,8 @@ class V8_EXPORT String : public Name {
public:
virtual ~ExternalStringResourceBase() {}
virtual bool IsCompressible() const { return false; }
protected:
ExternalStringResourceBase() {}
@ -2304,7 +2316,7 @@ class V8_EXPORT String : public Name {
int length = -1);
/** Allocates a new string from Latin-1 data.*/
static V8_DEPRECATE_SOON(
static V8_DEPRECATED(
"Use maybe version",
Local<String> NewFromOneByte(Isolate* isolate, const uint8_t* data,
NewStringType type = kNormalString,
@ -2343,10 +2355,9 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<String> NewExternal(Isolate* isolate,
ExternalStringResource* resource));
static V8_DEPRECATED("Use maybe version",
Local<String> NewExternal(
Isolate* isolate, ExternalStringResource* resource));
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
Isolate* isolate, ExternalStringResource* resource);
@ -2456,8 +2467,8 @@ class V8_EXPORT Symbol : public Name {
Local<Value> Name() const;
// Create a symbol. If name is not empty, it will be used as the description.
static Local<Symbol> New(
Isolate *isolate, Local<String> name = Local<String>());
static Local<Symbol> New(Isolate* isolate,
Local<String> name = Local<String>());
// Access global symbol registry.
// Note that symbols created this way are never collected, so
@ -2663,13 +2674,13 @@ class V8_EXPORT Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
V8_DEPRECATE_SOON("Use CreateDataProperty",
bool ForceSet(Local<Value> key, Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use CreateDataProperty",
Maybe<bool> ForceSet(Local<Context> context,
Local<Value> key, Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
bool ForceSet(Local<Value> key, Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
@ -2684,16 +2695,16 @@ class V8_EXPORT Object : public Value {
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
V8_DEPRECATE_SOON("Use maybe version",
PropertyAttribute GetPropertyAttributes(Local<Value> key));
V8_DEPRECATED("Use maybe version",
PropertyAttribute GetPropertyAttributes(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
Local<Context> context, Local<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Value> GetOwnPropertyDescriptor(Local<String> key));
V8_DEPRECATED("Use maybe version",
Local<Value> GetOwnPropertyDescriptor(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
Local<Context> context, Local<String> key);
@ -2705,27 +2716,27 @@ class V8_EXPORT Object : public Value {
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, Local<Value> key);
V8_DEPRECATE_SOON("Use maybe version", bool Has(uint32_t index));
V8_DEPRECATED("Use maybe version", bool Has(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
V8_DEPRECATE_SOON("Use maybe version", bool Delete(uint32_t index));
V8_DEPRECATED("Use maybe version", bool Delete(uint32_t index));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, uint32_t index);
V8_DEPRECATE_SOON("Use maybe version",
bool SetAccessor(Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter = 0,
Local<Value> data = Local<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None));
V8_DEPRECATE_SOON("Use maybe version",
bool SetAccessor(Local<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = 0,
Local<Value> data = Local<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None));
V8_DEPRECATED("Use maybe version",
bool SetAccessor(Local<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter = 0,
Local<Value> data = Local<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None));
V8_DEPRECATED("Use maybe version",
bool SetAccessor(Local<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = 0,
Local<Value> data = Local<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> SetAccessor(Local<Context> context, Local<Name> name,
AccessorNameGetterCallback getter,
@ -2782,8 +2793,7 @@ class V8_EXPORT Object : public Value {
* be skipped by __proto__ and it does not consult the security
* handler.
*/
V8_DEPRECATE_SOON("Use maybe version",
bool SetPrototype(Local<Value> prototype));
V8_DEPRECATED("Use maybe version", bool SetPrototype(Local<Value> prototype));
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
Local<Value> prototype);
@ -2798,7 +2808,7 @@ class V8_EXPORT Object : public Value {
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
V8_DEPRECATE_SOON("Use maybe version", Local<String> ObjectProtoToString());
V8_DEPRECATED("Use maybe version", Local<String> ObjectProtoToString());
V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
Local<Context> context);
@ -2843,8 +2853,7 @@ class V8_EXPORT Object : public Value {
void SetAlignedPointerInInternalField(int index, void* value);
// Testers for local properties.
V8_DEPRECATE_SOON("Use maybe version",
bool HasOwnProperty(Local<String> key));
V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
Local<Name> key);
V8_DEPRECATE_SOON("Use maybe version",
@ -2864,7 +2873,7 @@ class V8_EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
V8_DEPRECATE_SOON(
V8_DEPRECATED(
"Use maybe version",
Local<Value> GetRealNamedPropertyInPrototypeChain(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
@ -2875,7 +2884,7 @@ class V8_EXPORT Object : public Value {
* which can be None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
V8_DEPRECATE_SOON(
V8_DEPRECATED(
"Use maybe version",
Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
Local<String> key));
@ -2888,8 +2897,8 @@ class V8_EXPORT Object : public Value {
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Value> GetRealNamedProperty(Local<String> key));
V8_DEPRECATED("Use maybe version",
Local<Value> GetRealNamedProperty(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
Local<Context> context, Local<Name> key);
@ -2898,9 +2907,9 @@ class V8_EXPORT Object : public Value {
* None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
V8_DEPRECATE_SOON("Use maybe version",
Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<String> key));
V8_DEPRECATED("Use maybe version",
Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key);
@ -2919,12 +2928,12 @@ class V8_EXPORT Object : public Value {
*/
int GetIdentityHash();
V8_DEPRECATE_SOON("Use v8::Object::SetPrivate instead.",
bool SetHiddenValue(Local<String> key, Local<Value> value));
V8_DEPRECATE_SOON("Use v8::Object::GetHidden instead.",
Local<Value> GetHiddenValue(Local<String> key));
V8_DEPRECATE_SOON("Use v8::Object::DeletePrivate instead.",
bool DeleteHiddenValue(Local<String> key));
V8_DEPRECATED("Use v8::Object::SetPrivate instead.",
bool SetHiddenValue(Local<String> key, Local<Value> value));
V8_DEPRECATED("Use v8::Object::GetPrivate instead.",
Local<Value> GetHiddenValue(Local<String> key));
V8_DEPRECATED("Use v8::Object::DeletePrivate instead.",
bool DeleteHiddenValue(Local<String> key));
/**
* Clone this object with a fast but shallow copy. Values will point
@ -2949,9 +2958,9 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Value> CallAsFunction(Local<Value> recv, int argc,
Local<Value> argv[]));
V8_DEPRECATED("Use maybe version",
Local<Value> CallAsFunction(Local<Value> recv, int argc,
Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
Local<Value> recv,
int argc,
@ -2962,9 +2971,8 @@ class V8_EXPORT Object : public Value {
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Value> CallAsConstructor(int argc,
Local<Value> argv[]));
V8_DEPRECATED("Use maybe version",
Local<Value> CallAsConstructor(int argc, Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
Local<Context> context, int argc, Local<Value> argv[]);
@ -2996,10 +3004,11 @@ class V8_EXPORT Array : public Object {
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> CloneElementAt(uint32_t index));
V8_WARN_UNUSED_RESULT MaybeLocal<Object> CloneElementAt(
Local<Context> context, uint32_t index);
V8_DEPRECATED("Cloning is not supported.",
Local<Object> CloneElementAt(uint32_t index));
V8_DEPRECATED("Cloning is not supported.",
MaybeLocal<Object> CloneElementAt(Local<Context> context,
uint32_t index));
/**
* Creates a JavaScript array with the given length. If the length
@ -3042,15 +3051,6 @@ class V8_EXPORT Map : public Object {
*/
static Local<Map> New(Isolate* isolate);
/**
* Creates a new Map containing the elements of array, which must be formatted
* in the same manner as the array returned from AsArray().
* Guaranteed to be side-effect free if the array contains no holes.
*/
static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
"Use mutation methods instead",
MaybeLocal<Map> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Map* Cast(Value* obj);
private:
@ -3083,14 +3083,6 @@ class V8_EXPORT Set : public Object {
*/
static Local<Set> New(Isolate* isolate);
/**
* Creates a new Set containing the items in array.
* Guaranteed to be side-effect free if the array contains no holes.
*/
static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
"Use mutation methods instead",
MaybeLocal<Set> FromArray(Local<Context> context, Local<Array> array));
V8_INLINE static Set* Cast(Value* obj);
private:
@ -3238,13 +3230,12 @@ class V8_EXPORT Function : public Object {
Local<Function> New(Isolate* isolate, FunctionCallback callback,
Local<Value> data = Local<Value>(), int length = 0));
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> NewInstance(int argc, Local<Value> argv[])
const);
V8_DEPRECATED("Use maybe version",
Local<Object> NewInstance(int argc, Local<Value> argv[]) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance() const);
V8_DEPRECATED("Use maybe version", Local<Object> NewInstance() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context) const {
return NewInstance(context, 0, nullptr);
@ -3268,6 +3259,12 @@ class V8_EXPORT Function : public Object {
*/
Local<Value> GetInferredName() const;
/**
* displayName if it is set, otherwise name if it is configured, otherwise
* function name, otherwise inferred name.
*/
Local<Value> GetDebugName() const;
/**
* User-defined name assigned to the "displayName" property of this function.
* Used to facilitate debugging and profiling of JavaScript code.
@ -3357,18 +3354,19 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
V8_DEPRECATE_SOON("Use maybe version",
Local<Promise> Chain(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(Local<Context> context,
Local<Function> handler);
V8_DEPRECATED("Use maybe version of Then",
Local<Promise> Chain(Local<Function> handler));
V8_DEPRECATED("Use Then",
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(
Local<Context> context, Local<Function> handler));
V8_DEPRECATE_SOON("Use maybe version",
Local<Promise> Catch(Local<Function> handler));
V8_DEPRECATED("Use maybe version",
Local<Promise> Catch(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
Local<Function> handler);
V8_DEPRECATE_SOON("Use maybe version",
Local<Promise> Then(Local<Function> handler));
V8_DEPRECATED("Use maybe version",
Local<Promise> Then(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
Local<Function> handler);
@ -3386,6 +3384,32 @@ class V8_EXPORT Promise : public Object {
};
/**
* An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
* 26.2.1).
*/
class V8_EXPORT Proxy : public Object {
public:
Local<Object> GetTarget();
Local<Value> GetHandler();
bool IsRevoked();
void Revoke();
/**
* Creates a new empty Map.
*/
static MaybeLocal<Proxy> New(Local<Context> context,
Local<Object> local_target,
Local<Object> local_handler);
V8_INLINE static Proxy* Cast(Value* obj);
private:
Proxy();
static void CheckCast(Value* obj);
};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@ -3943,7 +3967,8 @@ class V8_EXPORT NumberObject : public Object {
*/
class V8_EXPORT BooleanObject : public Object {
public:
static Local<Value> New(bool value);
static Local<Value> New(Isolate* isolate, bool value);
V8_DEPRECATED("Pass an isolate", static Local<Value> New(bool value));
bool ValueOf() const;
@ -4422,6 +4447,16 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0);
/**
* Creates a function template with a fast handler. If a fast handler is set,
* the callback cannot be null.
*/
static Local<FunctionTemplate> NewWithFastHandler(
Isolate* isolate, FunctionCallback callback,
experimental::FastAccessorBuilder* fast_handler = nullptr,
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0);
/** Returns the unique function instance in the current execution context.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
@ -4432,8 +4467,9 @@ class V8_EXPORT FunctionTemplate : public Template {
* callback is called whenever the function created from this
* FunctionTemplate is called.
*/
void SetCallHandler(FunctionCallback callback,
Local<Value> data = Local<Value>());
void SetCallHandler(
FunctionCallback callback, Local<Value> data = Local<Value>(),
experimental::FastAccessorBuilder* fast_handler = nullptr);
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@ -4584,7 +4620,7 @@ class V8_EXPORT ObjectTemplate : public Template {
static Local<ObjectTemplate> New(
Isolate* isolate,
Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
static V8_DEPRECATE_SOON("Use isolate version", Local<ObjectTemplate> New());
static V8_DEPRECATED("Use isolate version", Local<ObjectTemplate> New());
/** Creates a new instance of this template.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
@ -4717,7 +4753,7 @@ class V8_EXPORT ObjectTemplate : public Template {
void SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data = Local<Value>());
V8_DEPRECATE_SOON(
V8_DEPRECATED(
"Use SetAccessCheckCallback instead",
void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
IndexedSecurityCallback indexed_handler,
@ -4772,21 +4808,6 @@ class V8_EXPORT AccessorSignature : public Data {
};
/**
* A utility for determining the type of objects based on the template
* they were constructed from.
*/
class V8_EXPORT TypeSwitch : public Data {
public:
static Local<TypeSwitch> New(Local<FunctionTemplate> type);
static Local<TypeSwitch> New(int argc, Local<FunctionTemplate> types[]);
int match(Local<Value> value);
private:
TypeSwitch();
};
// --- Extensions ---
class V8_EXPORT ExternalOneByteStringResourceImpl
@ -4932,7 +4953,9 @@ class V8_EXPORT Exception {
* Will try to reconstruct the original stack trace from the exception value,
* or capture the current stack trace if not available.
*/
static Local<Message> CreateMessage(Local<Value> exception);
static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
V8_DEPRECATED("Use version with an Isolate*",
static Local<Message> CreateMessage(Local<Value> exception));
/**
* Returns the original stack trace that was captured at the creation time
@ -4997,8 +5020,10 @@ class PromiseRejectMessage {
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Local<Value> GetValue() const { return value_; }
// DEPRECATED. Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()
V8_INLINE Local<StackTrace> GetStackTrace() const { return stack_trace_; }
V8_DEPRECATED("Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()",
V8_INLINE Local<StackTrace> GetStackTrace() const) {
return stack_trace_;
}
private:
Local<Promise> promise_;
@ -5050,12 +5075,6 @@ enum GCCallbackFlags {
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
};
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCPrologueCallback)(GCType type,
GCCallbackFlags flags));
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCEpilogueCallback)(GCType type,
GCCallbackFlags flags));
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
typedef void (*InterruptCallback)(Isolate* isolate, void* data);
@ -5426,6 +5445,15 @@ class V8_EXPORT Isolate {
kSloppyMode = 8,
kStrictMode = 9,
kStrongMode = 10,
kRegExpPrototypeStickyGetter = 11,
kRegExpPrototypeToString = 12,
kRegExpPrototypeUnicodeGetter = 13,
kIntlV8Parse = 14,
kIntlPattern = 15,
kIntlResolved = 16,
kPromiseChain = 17,
kPromiseAccept = 18,
kPromiseDefer = 19,
kUseCounterFeatureCount // This enum value must be last.
};
@ -5492,6 +5520,15 @@ class V8_EXPORT Isolate {
*/
void Dispose();
/**
* Discards all V8 thread-specific data for the Isolate. Should be used
* if a thread is terminating and it has used an Isolate that will outlive
* the thread -- all thread-specific data for an Isolate is discarded when
* an Isolate is disposed so this call is pointless if an Isolate is about
* to be Disposed.
*/
void DiscardThreadSpecificMetadata();
/**
* Associate embedder-specific data with the isolate. |slot| has to be
* between 0 and GetNumberOfDataSlots() - 1.
@ -5656,14 +5693,6 @@ class V8_EXPORT Isolate {
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCPrologueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags));
V8_DEPRECATE_SOON("Use GCCallBack instead",
typedef void (*GCEpilogueCallback)(Isolate* isolate,
GCType type,
GCCallbackFlags flags));
typedef void (*GCCallback)(Isolate* isolate, GCType type,
GCCallbackFlags flags);
@ -5850,8 +5879,8 @@ class V8_EXPORT Isolate {
*/
bool IdleNotificationDeadline(double deadline_in_seconds);
V8_DEPRECATE_SOON("use IdleNotificationDeadline()",
bool IdleNotification(int idle_time_in_ms));
V8_DEPRECATED("use IdleNotificationDeadline()",
bool IdleNotification(int idle_time_in_ms));
/**
* Optional notification that the system is running low on memory.
@ -6072,7 +6101,7 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFatalErrorHandler(FatalErrorCallback that));
@ -6080,7 +6109,7 @@ class V8_EXPORT V8 {
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that));
@ -6088,7 +6117,7 @@ class V8_EXPORT V8 {
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
V8_INLINE static V8_DEPRECATE_SOON("no alternative", bool IsDead());
V8_INLINE static V8_DEPRECATED("Use isolate version", bool IsDead());
/**
* Hand startup data to V8, in case the embedder has chosen to build
@ -6124,7 +6153,7 @@ class V8_EXPORT V8 {
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool AddMessageListener(MessageCallback that,
Local<Value> data = Local<Value>()));
@ -6132,14 +6161,14 @@ class V8_EXPORT V8 {
/**
* Remove all message listeners from the specified callback function.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version", void RemoveMessageListeners(MessageCallback that));
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit = 10,
@ -6161,7 +6190,7 @@ class V8_EXPORT V8 {
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
@ -6175,7 +6204,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
static V8_DEPRECATE_SOON(
static V8_DEPRECATED(
"Use isolate version",
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@ -6184,7 +6213,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCPrologueCallback(GCCallback callback));
@ -6198,7 +6227,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
static V8_DEPRECATE_SOON(
static V8_DEPRECATED(
"Use isolate version",
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@ -6207,7 +6236,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCEpilogueCallback(GCCallback callback));
@ -6215,7 +6244,7 @@ class V8_EXPORT V8 {
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
@ -6224,7 +6253,7 @@ class V8_EXPORT V8 {
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback));
@ -6256,8 +6285,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
V8_INLINE static V8_DEPRECATE_SOON("Use isolate version",
void TerminateExecution(Isolate* isolate));
V8_INLINE static V8_DEPRECATED("Use isolate version",
void TerminateExecution(Isolate* isolate));
/**
* Is V8 terminating JavaScript execution.
@ -6269,7 +6298,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to check.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool IsExecutionTerminating(Isolate* isolate = NULL));
@ -6289,7 +6318,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to resume execution capability.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version", void CancelTerminateExecution(Isolate* isolate));
/**
@ -6308,15 +6337,15 @@ class V8_EXPORT V8 {
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
V8_INLINE static V8_DEPRECATE_SOON(
"Use isoalte version",
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
@ -6324,7 +6353,7 @@ class V8_EXPORT V8 {
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor));
@ -6336,7 +6365,7 @@ class V8_EXPORT V8 {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
V8_INLINE static V8_DEPRECATE_SOON(
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor));
@ -6494,7 +6523,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
V8_DEPRECATE_SOON("Use isolate version", TryCatch());
V8_DEPRECATED("Use isolate version", TryCatch());
/**
* Creates a new try/catch block and registers it with v8. Note that
@ -7180,7 +7209,7 @@ class Internals {
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
uint32_t slot,
void* data) {
uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
@ -8059,6 +8088,14 @@ Promise* Promise::Cast(v8::Value* value) {
}
Proxy* Proxy::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Proxy*>(value);
}
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@ -8483,4 +8520,4 @@ void V8::VisitHandlesForPartialDependence(Isolate* isolate,
#undef TYPE_CHECK
#endif // V8_H_
#endif // INCLUDE_V8_H_

3
deps/v8/include/v8config.h

@ -163,7 +163,6 @@
//
// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
//
// Compiler-specific feature detection
//
@ -230,7 +229,6 @@
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
#elif defined(__GNUC__)
@ -277,7 +275,6 @@
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif
#endif

37
deps/v8/infra/config/cq.cfg

@ -28,22 +28,49 @@ verifiers {
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
builders { name: "v8_linux64_rel" }
builders { name: "v8_linux64_rel_ng" }
builders {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg" }
builders { name: "v8_linux_dbg_ng" }
builders {
name: "v8_linux_dbg_ng_triggered"
triggered_by: "v8_linux_dbg_ng"
}
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
builders { name: "v8_linux_rel" }
builders { name: "v8_linux_rel_ng" }
builders {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel" }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"
triggered_by: "v8_win64_rel_ng"
}
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
builders { name: "v8_win_rel" }
builders { name: "v8_win_rel_ng" }
builders {
name: "v8_win_rel_ng_triggered"
triggered_by: "v8_win_rel_ng"
}
}
buckets {
name: "tryserver.blink"
builders {
name: "linux_blink_rel"
experiment_percentage: 20
}
}
}

4
deps/v8/samples/samples.gyp

@ -40,10 +40,6 @@
'include_dirs': [
'..',
],
'defines': [
# TODO(jochen): Remove again after this is globally turned on.
'V8_IMMINENT_DEPRECATION_WARNINGS',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [

4
deps/v8/snapshot_toolchain.gni

@ -34,10 +34,10 @@
if (host_cpu == "x64" && host_os == "linux") {
if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
} else if (target_cpu == "x64") {
} else if (target_cpu == "x64" || target_cpu == "arm64" || target_cpu == "mips64el") {
snapshot_toolchain = "//build/toolchain/linux:clang_x64"
} else {
assert(false, "Need environment for this arch")
assert(false, "Need environment for this arch: $target_cpu")
}
} else {
snapshot_toolchain = default_toolchain

6
deps/v8/src/DEPS

@ -1,7 +1,10 @@
include_rules = [
"+base/trace_event/common/trace_event_common.h",
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-stub-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
@ -24,4 +27,7 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
"api-experimental\.cc": [
"+src/compiler/fast-accessor-assembler.h",
],
}

2
deps/v8/src/OWNERS

@ -1,2 +1,4 @@
per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org

11
deps/v8/src/accessors.cc

@ -161,7 +161,8 @@ void Accessors::ArgumentsIteratorSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object_handle = Utils::OpenHandle(*info.This());
Handle<JSObject> object_handle =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> value_handle = Utils::OpenHandle(*val);
Handle<Name> name_handle = Utils::OpenHandle(*name);
@ -205,7 +206,7 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
@ -1328,12 +1329,6 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (!caller->shared()->native() && potential_caller != NULL) {
caller = potential_caller;
}
// If caller is bound, return null. This is compatible with JSC, and
// allows us to make bound functions use the strict function map
// and its associated throwing caller and arguments.
if (caller->shared()->bound()) {
return MaybeHandle<JSFunction>();
}
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310

2
deps/v8/src/allocation-site-scopes.h

@ -5,7 +5,7 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
#include "src/ast.h"
#include "src/ast/ast.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/zone.h"

126
deps/v8/src/api-experimental.cc

@ -0,0 +1,126 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* Implementation for v8-experimental.h.
*/
#include "src/api-experimental.h"
#include "include/v8.h"
#include "include/v8-experimental.h"
#include "src/api.h"
#include "src/compiler/fast-accessor-assembler.h"
namespace {
v8::internal::compiler::FastAccessorAssembler* FromApi(
v8::experimental::FastAccessorBuilder* builder) {
return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
builder);
}
v8::experimental::FastAccessorBuilder* FromInternal(
v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
fast_accessor_assembler);
}
} // namespace
namespace v8 {
namespace internal {
namespace experimental {
MaybeHandle<Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler) {
i::MaybeHandle<i::Code> code;
if (fast_handler != nullptr) {
auto faa = FromApi(fast_handler);
code = faa->Build();
CHECK(!code.is_null());
delete faa;
}
return code;
}
} // namespace experimental
} // namespace internal
namespace experimental {
FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal::compiler::FastAccessorAssembler* faa =
new internal::compiler::FastAccessorAssembler(i_isolate);
return FromInternal(faa);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::IntegerConstant(
int const_value) {
return FromApi(this)->IntegerConstant(const_value);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::GetReceiver() {
return FromApi(this)->GetReceiver();
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalField(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
int offset) {
return FromApi(this)->LoadValue(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
int offset) {
return FromApi(this)->LoadObject(value_id, offset);
}
void FastAccessorBuilder::ReturnValue(ValueId value) {
FromApi(this)->ReturnValue(value);
}
void FastAccessorBuilder::CheckFlagSetOrReturnNull(ValueId value_id, int mask) {
FromApi(this)->CheckFlagSetOrReturnNull(value_id, mask);
}
void FastAccessorBuilder::CheckNotZeroOrReturnNull(ValueId value_id) {
FromApi(this)->CheckNotZeroOrReturnNull(value_id);
}
FastAccessorBuilder::LabelId FastAccessorBuilder::MakeLabel() {
return FromApi(this)->MakeLabel();
}
void FastAccessorBuilder::SetLabel(LabelId label_id) {
FromApi(this)->SetLabel(label_id);
}
void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
}
} // namespace experimental
} // namespace v8

28
deps/v8/src/api-experimental.h

@ -0,0 +1,28 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_API_EXPERIMENTAL_H_
#define V8_API_EXPERIMENTAL_H_
#include "src/handles.h"
namespace v8 {
namespace internal {
class Code;
} // internal;
namespace experimental {
class FastAccessorBuilder;
} // experimental
namespace internal {
namespace experimental {
v8::internal::MaybeHandle<v8::internal::Code> BuildCodeFromFastAccessorBuilder(
v8::experimental::FastAccessorBuilder* fast_handler);
} // namespace experimental
} // namespace internal
} // namespace v8
#endif // V8_API_EXPERIMENTAL_H_

14
deps/v8/src/api-natives.cc

@ -438,8 +438,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
Handle<Code> code = isolate->builtins()->HandleApiCall();
Handle<Code> construct_stub = isolate->builtins()->JSConstructStubApi();
Handle<Code> code;
if (obj->call_code()->IsCallHandlerInfo() &&
CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
code = isolate->builtins()->HandleFastApiCall();
} else {
code = isolate->builtins()->HandleApiCall();
}
Handle<Code> construct_stub =
prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
: isolate->builtins()->JSConstructStubApi();
obj->set_instantiated(true);
Handle<JSFunction> result;
@ -540,7 +548,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
map->set_is_callable();
map->set_is_constructor(true);
map->set_is_constructor();
}
// Recursively copy parent instance templates' accessors,

774
deps/v8/src/api.cc

File diff suppressed because it is too large

14
deps/v8/src/api.h

@ -142,10 +142,9 @@ class RegisteredExtension {
V(ObjectTemplate, ObjectTemplateInfo) \
V(Signature, FunctionTemplateInfo) \
V(AccessorSignature, FunctionTemplateInfo) \
V(TypeSwitch, TypeSwitchInfo) \
V(Data, Object) \
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Object, JSReceiver) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
@ -174,6 +173,7 @@ class RegisteredExtension {
V(External, Object) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap)
class Utils {
@ -200,6 +200,8 @@ class Utils {
v8::internal::Handle<v8::internal::Symbol> obj);
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
@ -208,13 +210,14 @@ class Utils {
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<Proxy> ToLocal(
v8::internal::Handle<v8::internal::JSProxy> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
static inline Local<DataView> ToLocal(
v8::internal::Handle<v8::internal::JSDataView> obj);
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
@ -261,8 +264,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<NativeWeakMap> NativeWeakMapToLocal(
@ -353,10 +354,12 @@ MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
@ -369,7 +372,6 @@ MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)

23
deps/v8/src/arm/assembler-arm-inl.h

@ -104,7 +104,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@ -131,7 +132,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, host_,
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@ -257,7 +258,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(pc_, host_, NULL);
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@ -472,9 +473,9 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_embedded_constant_pool) {
set_target_address_at(constant_pool_entry, code, target);
set_target_address_at(isolate, constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
}
@ -482,7 +483,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@ -572,15 +573,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
// Assembler::FlushICache(isolate, pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@ -598,7 +599,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICacheWithoutIsolate(pc, 2 * kInstrSize);
Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@ -618,7 +619,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICacheWithoutIsolate(pc, 4 * kInstrSize);
Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
}
}
}

96
deps/v8/src/arm/assembler-arm.cc

@ -843,8 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
1,
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target24));
} else {
@ -853,14 +852,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
1,
CodePatcher::DONT_FLUSH);
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
1, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
} else {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
2,
CodePatcher::DONT_FLUSH);
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
}
@ -870,15 +867,13 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
2,
CodePatcher::DONT_FLUSH);
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
2, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
} else {
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
3,
CodePatcher::DONT_FLUSH);
CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
3, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
@ -3362,6 +3357,20 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@ -3376,6 +3385,20 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@ -3390,6 +3413,20 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@ -3404,6 +3441,20 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
// M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@ -3418,6 +3469,20 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
}
void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
0x5 * B9 | B7 | B6 | m * B5 | vm);
}
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
@ -3594,6 +3659,7 @@ void Assembler::GrowBuffer() {
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc.origin = this;
// Copy the data.
int pc_delta = desc.buffer - buffer_;
@ -3669,7 +3735,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
RelocInfo rinfo(pc_, rmode, data, NULL);
RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}

26
deps/v8/src/arm/assembler-arm.h

@ -671,19 +671,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Address pc, Address constant_pool, Address target,
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
Code* code,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
set_target_address_at(isolate, pc, constant_pool, target,
icache_flush_mode);
}
// Return the code target address at a call site from the return address
@ -697,11 +696,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
Isolate* isolate, Address constant_pool_entry, Code* code,
Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Address pc, Address target,
Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@ -1211,10 +1211,16 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
// ARMv8 rounding instructions.
void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
@ -1308,7 +1314,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.

1724
deps/v8/src/arm/builtins-arm.cc

File diff suppressed because it is too large

379
deps/v8/src/arm/code-stubs-arm.cc

@ -250,7 +250,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@ -271,7 +271,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@ -436,11 +436,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
__ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
// FIRST_JS_RECEIVER_TYPE.
__ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@ -453,7 +453,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
__ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@ -520,9 +520,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
__ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, not_both_strings);
__ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@ -682,8 +682,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
__ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
1);
__ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@ -697,9 +696,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ TailCallRuntime(
is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
1);
__ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
: Runtime::kCompare);
}
__ bind(&miss);
@ -901,7 +899,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
__ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@ -1346,16 +1344,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
// Ensure that {function} is not bound.
Register const shared_info = scratch;
__ ldr(shared_info,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ ldr(scratch, FieldMemOperand(shared_info,
SharedFunctionInfo::kCompilerHintsOffset));
__ tst(scratch,
Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
__ b(ne, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
__ ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@ -1380,27 +1368,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
Register const object_prototype = object_map;
Register const object_instance_type = function_map;
Register const map_bit_field = function_map;
Register const null = scratch;
Label done, loop;
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
Register const result = r0;
Label done, loop, fast_runtime_fallback;
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, function_prototype);
// Check if the object needs to be access checked.
__ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
__ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &fast_runtime_fallback);
// Check if the current object is a Proxy.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
__ b(eq, &fast_runtime_fallback);
__ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object, function_prototype);
__ b(eq, &done);
__ cmp(object_prototype, null);
__ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ cmp(object, null);
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ b(ne, &loop);
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
// Slow-case: Call the runtime function.
// Found Proxy or access check needed: Call the runtime
__ bind(&fast_runtime_fallback);
__ Push(object, function_prototype);
// Invalidate the instanceof cache.
__ Move(scratch, Smi::FromInt(0));
__ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
__ TailCallRuntime(Runtime::kHasInPrototypeChain);
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
__ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
__ TailCallRuntime(Runtime::kInstanceOf);
}
@ -1502,7 +1510,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
__ TailCallRuntime(Runtime::kArguments, 1, 1);
__ TailCallRuntime(Runtime::kArguments);
}
@ -1529,7 +1537,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r1, r3, r2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@ -1590,7 +1598,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r4, r9, &runtime, TAG_OBJECT);
__ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@ -1600,8 +1608,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ ldr(r4, NativeContextMemOperand());
__ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@ -1730,7 +1737,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 = argument count (tagged)
__ bind(&runtime);
__ Push(r1, r3, r5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@ -1749,7 +1756,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@ -1793,10 +1800,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(
r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
@ -1845,7 +1849,29 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r1, r3, r2);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// r2 : number of parameters (tagged)
// r3 : parameters pointer
// r4 : rest parameter index (tagged)
Label runtime;
__ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &runtime);
// Patch the arguments.length and the parameters pointer.
__ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ bind(&runtime);
__ Push(r2, r3, r4);
__ TailCallRuntime(Runtime::kNewRestParam);
}
@ -1854,7 +1880,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@ -2125,7 +2151,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ b(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@ -2220,7 +2246,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@ -2263,33 +2289,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
bool is_super) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
// r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
if (is_super) {
__ Push(r4);
}
__ CallStub(stub);
if (is_super) {
__ Pop(r4);
}
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@ -2297,7 +2315,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
// r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@ -2338,7 +2355,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
@ -2361,7 +2378,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
@ -2369,12 +2386,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, is_super);
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@ -2384,7 +2401,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@ -2393,28 +2409,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
GenerateRecordCallTarget(masm);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r2, or undefined.
__ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(eq, &feedback_register_initialized);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into r2, or undefined.
__ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(eq, &feedback_register_initialized);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r2, r5);
}
__ AssertUndefinedOrAllocationSite(r2, r5);
// Pass function as original constructor.
if (IsSuperConstructorCall()) {
__ mov(r3, r4);
} else {
__ mov(r3, r1);
}
// Pass function as new target.
__ mov(r3, r1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@ -2433,7 +2443,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r3 - slot id
// r2 - vector
// r4 - allocation site (loaded from vector[slot])
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, miss);
@ -2457,11 +2467,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, call;
Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@ -2498,9 +2504,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ bind(&call);
__ bind(&call_function);
__ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@ -2535,14 +2542,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ sub(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
__ ldr(r4, FieldMemOperand(r2, generic_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, generic_offset));
__ jmp(&call);
__ bind(&call);
__ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@ -2555,14 +2559,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(eq, &miss);
// Update stats.
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
// Make sure the function belongs to the same native context.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
__ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
__ ldr(ip, NativeContextMemOperand());
__ cmp(r4, ip);
__ b(ne, &miss);
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
@ -2581,7 +2587,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r1);
}
__ jmp(&call);
__ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@ -2599,7 +2605,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss, 3);
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@ -2664,11 +2670,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
__ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@ -2695,7 +2701,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@ -2734,7 +2740,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
__ CallRuntime(Runtime::kStringCharFromCode, 1);
__ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@ -2990,7 +2996,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r0: original string
@ -3030,7 +3036,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&slow_string);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
__ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@ -3041,7 +3047,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber, 1, 1);
__ TailCallRuntime(Runtime::kToNumber);
}
@ -3056,7 +3062,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToLength, 1, 1);
__ TailCallRuntime(Runtime::kToLength);
}
@ -3086,7 +3092,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToString, 1, 1);
__ TailCallRuntime(Runtime::kToString);
}
@ -3216,7 +3222,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare);
}
@ -3258,7 +3264,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
@ -3519,9 +3525,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
__ TailCallRuntime(Runtime::kStringEquals);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@ -3529,16 +3535,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
void CompareICStub::GenerateObjects(MacroAssembler* masm) {
DCHECK(state() == CompareICState::OBJECT);
void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
__ b(lt, &miss);
__ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
__ b(lt, &miss);
DCHECK(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
@ -3549,7 +3556,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r2, r1, Operand(r0));
@ -3566,7 +3573,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r0, r0, Operand(r1));
__ Ret();
} else if (is_strong(strength())) {
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ mov(r2, Operand(Smi::FromInt(GREATER)));
@ -3574,7 +3581,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(LESS)));
}
__ Push(r1, r0, r2);
__ TailCallRuntime(Runtime::kCompare, 3, 1);
__ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@ -3590,7 +3597,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ CallRuntime(Runtime::kCompareIC_Miss, 3);
__ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@ -4035,11 +4042,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ JumpIfWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@ -4059,68 +4066,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : element value to store
// -- r3 : element index as smi
// -- sp[0] : array literal index in function as smi
// -- sp[4] : array literal
// clobbers r1, r2, r4
// -----------------------------------
Label element_done;
Label double_elements;
Label smi_element;
Label slow_elements;
Label fast_elements;
// Get array literal index, array literal and its map.
__ ldr(r4, MemOperand(sp, 0 * kPointerSize));
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
__ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
__ CheckFastElements(r2, r5, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
__ bind(&slow_elements);
// call.
__ Push(r1, r3, r0);
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r6, 0));
// Update the write barrier for the array store.
__ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
__ Ret();
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
__ Ret();
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@ -4835,7 +4780,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
// -- r3 : original constructor
// -- r3 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@ -4856,6 +4801,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r2, r4);
}
// Enter the context of the Array function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Label subclassing;
__ cmp(r3, r1);
__ b(ne, &subclassing);
@ -4875,25 +4823,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
__ push(r1);
__ push(r3);
// Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
__ add(r0, r0, Operand(2));
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ add(r0, r0, Operand(3));
break;
case NONE:
__ mov(r0, Operand(2));
__ str(r1, MemOperand(sp, 0 * kPointerSize));
__ mov(r0, Operand(3));
break;
case ONE:
__ mov(r0, Operand(3));
__ str(r1, MemOperand(sp, 1 * kPointerSize));
__ mov(r0, Operand(4));
break;
}
__ JumpToExternalReference(
ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
__ Push(r3, r2);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@ -4979,13 +4925,13 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
__ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
__ ldr(result, ContextOperand(result));
__ ldr(result, ContextMemOperand(result));
__ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@ -4995,7 +4941,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ push(slot);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@ -5021,13 +4967,13 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
__ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
__ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
__ ldr(cell, ContextOperand(cell));
__ ldr(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@ -5119,8 +5065,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2, 1);
: Runtime::kStoreGlobalViaContext_Sloppy);
}
@ -5247,7 +5192,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
__ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);

5
deps/v8/src/arm/code-stubs-arm.h

@ -109,9 +109,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(NULL,
stub->instruction_start(),
stub->instruction_size());
MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||

65
deps/v8/src/arm/codegen-arm.cc

@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
fast_exp_arm_machine_code, x, 0);
byte* fast_exp_arm_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
return Simulator::current(isolate)
->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
DwVfpRegister input = d0;
@ -67,11 +67,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
@ -79,7 +79,8 @@ UnaryMathFunction CreateExpFunction() {
}
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@ -87,9 +88,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
if (buffer == nullptr) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@ -227,7 +229,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@ -236,7 +238,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
MemCopyUint16Uint8Function stub) {
Isolate* isolate, MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@ -244,9 +246,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
if (buffer == nullptr) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@ -314,7 +317,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(&desc);
Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
@ -322,16 +325,17 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
#endif
UnaryMathFunction CreateSqrtFunction() {
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return &std::sqrt;
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
if (buffer == nullptr) return nullptr;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
@ -342,9 +346,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@ -882,15 +886,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
CodeAgingHelper::CodeAgingHelper() {
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
base::SmartPointer<CodePatcher> patcher(new CodePatcher(
young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
base::SmartPointer<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
@ -937,7 +943,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->emit_code_stub_address(stub);

2
deps/v8/src/arm/codegen-arm.h

@ -5,7 +5,7 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
#include "src/ast.h"
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {

7
deps/v8/src/arm/deoptimizer-arm.cc

@ -40,14 +40,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(pointer, 1);
CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
1);
osr_patcher.masm()->bkpt(0);
}
}
@ -72,7 +73,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());

8
deps/v8/src/arm/disasm-arm.cc

@ -1781,28 +1781,28 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if (dp_operation) {
Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr);
Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
break;
default:

59
deps/v8/src/arm/interface-descriptors-arm.cc

@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
const Register RestParamAccessDescriptor::parameter_count() { return r2; }
const Register RestParamAccessDescriptor::parameter_pointer() { return r3; }
const Register RestParamAccessDescriptor::rest_parameter_index() { return r4; }
const Register ApiGetterDescriptor::function_address() { return r2; }
@ -127,6 +132,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1};
@ -189,7 +201,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : original constructor (for IsSuperConstructorCall)
// r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
@ -206,6 +218,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the target to call
// r3 : the new target
// r2 : allocation site or undefined
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the target to call
// r3 : the new target
Register registers[] = {r1, r3, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
@ -358,6 +391,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r1, // JSFunction
r3, // the new target
r0, // actual number of arguments
r2, // expected number of arguments
};
@ -399,27 +433,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void MathRoundVariantCallFromOptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
r4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -435,7 +448,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r3, // original constructor
r3, // new target
r1, // constructor to call
r2 // address of the first argument
};

510
deps/v8/src/arm/macro-assembler-arm.cc

@ -19,11 +19,12 @@
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
if (isolate() != NULL) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@ -1236,8 +1237,6 @@ void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@ -1257,7 +1256,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(r0));
DCHECK(expected.is_immediate() || expected.reg().is(r2));
DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@ -1289,11 +1287,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
if (!code_constant.is_null()) {
mov(r3, Operand(code_constant));
add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
}
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@ -1311,20 +1304,79 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference step_in_enabled =
ExternalReference::debug_step_in_enabled_address(isolate());
mov(r4, Operand(step_in_enabled));
ldrb(r4, MemOperand(r4));
cmp(r4, Operand(0));
b(eq, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
SmiTag(expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
SmiTag(actual.reg());
Push(actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiUntag(actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiUntag(expected.reg());
}
}
bind(&skip_flooding);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function.is(r1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
if (call_wrapper.NeedsDebugStepCheck()) {
FloodFunctionIfStepping(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(r3, Heap::kUndefinedValueRootIndex);
}
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, &definitely_mismatches, flag,
InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = r4;
ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@ -1342,6 +1394,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register fun,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@ -1352,19 +1405,17 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(r1));
Register expected_reg = r2;
Register code_reg = r3;
Register temp_reg = r4;
ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ldr(expected_reg,
FieldMemOperand(code_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(expected_reg);
ldr(code_reg,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag, call_wrapper);
InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@ -1382,11 +1433,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
InvokeCode(r3, expected, actual, flag, call_wrapper);
InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
}
@ -1474,10 +1521,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
ldr(scratch, FieldMemOperand(scratch, offset));
ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@ -1661,11 +1705,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
DCHECK(!result.is(scratch1));
DCHECK(!result.is(scratch2));
DCHECK(!scratch1.is(scratch2));
DCHECK(!scratch1.is(ip));
DCHECK(!scratch2.is(ip));
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@ -1682,48 +1722,46 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
reinterpret_cast<intptr_t>(allocation_limit.address());
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
// Set up allocation top address register.
Register topaddr = scratch1;
mov(topaddr, Operand(allocation_top));
Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
// Load allocation top into result and allocation limit into alloc_limit.
ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
// Assert that result actually contains top on entry.
ldr(alloc_limit, MemOperand(top_address));
cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
// Load allocation limit. Result already contains allocation top.
ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
cmp(result, Operand(ip));
cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@ -1743,15 +1781,15 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
add(scratch2, source, bits_operand, SetCC, cond);
source = scratch2;
add(result_end, source, bits_operand, SetCC, cond);
source = result_end;
cond = cc;
}
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
str(scratch2, MemOperand(topaddr));
str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@ -1760,32 +1798,25 @@ void MacroAssembler::Allocate(int object_size,
}
void MacroAssembler::Allocate(Register object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
mov(scratch1, Operand(0x7191));
mov(scratch2, Operand(0x7291));
mov(scratch, Operand(0x7191));
mov(result_end, Operand(0x7291));
}
jmp(gc_required);
return;
}
// Assert that the register arguments are different and that none of
// them are ip. ip is used explicitly in the code generated below.
DCHECK(!result.is(scratch1));
DCHECK(!result.is(scratch2));
DCHECK(!scratch1.is(scratch2));
DCHECK(!object_size.is(ip));
DCHECK(!result.is(ip));
DCHECK(!scratch1.is(ip));
DCHECK(!scratch2.is(ip));
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
@ -1795,48 +1826,45 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top =
reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
reinterpret_cast<intptr_t>(allocation_limit.address());
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
// Set up allocation top address.
Register topaddr = scratch1;
mov(topaddr, Operand(allocation_top));
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
ldm(ia, topaddr, result.bit() | ip.bit());
// Load allocation top into result and allocation limit into alloc_limit.
ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
ldr(ip, MemOperand(topaddr));
cmp(result, ip);
// Assert that result actually contains top on entry.
ldr(alloc_limit, MemOperand(top_address));
cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
ldr(ip, MemOperand(topaddr, limit - top));
// Load allocation limit. Result already contains allocation top.
ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
cmp(result, Operand(ip));
cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@ -1844,20 +1872,20 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
add(scratch2, result, Operand(object_size), SetCC);
add(result_end, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(scratch2, Operand(kObjectAlignmentMask));
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
str(scratch2, MemOperand(topaddr));
str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@ -2063,6 +2091,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@ -2452,24 +2481,17 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
JumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
TailCallExternalReference(ExternalReference(fid, isolate()),
num_arguments,
result_size);
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(function->nargs));
}
JumpToExternalReference(ExternalReference(fid, isolate()));
}
@ -2489,35 +2511,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(r2, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
Call(r2);
call_wrapper.AfterCall();
} else {
DCHECK(flag == JUMP_FUNCTION);
Jump(r2);
}
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
ldr(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
ldr(target, ContextOperand(target, native_context_index));
}
void MacroAssembler::GetBuiltinEntry(Register target,
int native_context_index) {
DCHECK(!target.is(r1));
GetBuiltinFunction(r1, native_context_index);
// Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
// Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
LoadNativeContextSlot(native_context_index, r1);
InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
}
@ -2651,49 +2648,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
ldr(dst, GlobalObjectOperand());
ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
ldr(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
ldr(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
ldr(ip, FieldMemOperand(scratch, offset));
ldr(scratch, NativeContextMemOperand());
ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
ldr(map_in_out, FieldMemOperand(scratch, offset));
ldr(map_in_out,
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
ldr(function,
FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
ldr(function, MemOperand(function, Context::SlotOffset(index)));
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, NativeContextMemOperand());
ldr(dst, ContextMemOperand(dst, index));
}
@ -2831,6 +2809,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotABoundFunction);
push(object);
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
pop(object);
Check(eq, kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@ -2945,27 +2936,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
LowDwVfpRegister double_scratch,
int field_count) {
int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
for (int i = 0; i < double_count; i++) {
vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
}
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch1,
Register scratch2, Label* gc_required) {
DCHECK(!result.is(constructor));
DCHECK(!result.is(scratch1));
DCHECK(!result.is(scratch2));
DCHECK(!result.is(value));
STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
// Allocate JSValue in new space.
Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
if (remain != 0) {
vldr(double_scratch.low(),
FieldMemOperand(src, (field_count - 1) * kPointerSize));
vstr(double_scratch.low(),
FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
str(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@ -3022,15 +3011,15 @@ void MacroAssembler::CopyBytes(Register src,
}
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
str(filler, MemOperand(current_address, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
cmp(current_address, end_address);
b(lo, &loop);
}
@ -3281,8 +3270,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@ -3315,27 +3304,6 @@ void MacroAssembler::HasColor(Register object,
}
// Detect some, but not all, common pointer-free objects. This is used by the
// incremental write barrier which doesn't care about oddballs (they are always
// marked black immediately so this code is not hit).
void MacroAssembler::JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object) {
Label is_data_object;
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
b(eq, &is_data_object);
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, not_data_object);
bind(&is_data_object);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@ -3350,96 +3318,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register mask_scratch, Register load_scratch,
Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
b(ne, &done);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.
tst(load_scratch, Operand(mask_scratch, LSL, 1));
b(eq, &ok);
stop("Impossible marking bit pattern");
bind(&ok);
}
// Value is white. We check whether it is data that doesn't need scanning.
// Currently only checks for HeapNumber and non-cons strings.
Register map = load_scratch; // Holds map while checking type.
Register length = load_scratch; // Holds length of object after testing type.
Label is_data_object;
// Check for heap-number
ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
b(eq, &is_data_object);
// Check for strings.
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
tst(instance_type, Operand(kExternalStringTag));
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
// Sequential string, either Latin1 or UC16.
// For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, length, Operand(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
orr(ip, ip, Operand(mask_scratch));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
add(ip, ip, Operand(length));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
b(eq, value_is_white);
}
@ -3661,12 +3556,11 @@ bool AreAliased(Register reg1,
#endif
CodePatcher::CodePatcher(byte* address,
int instructions,
CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap),
masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@ -3678,7 +3572,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
Assembler::FlushICacheWithoutIsolate(address_, size_);
Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.

131
deps/v8/src/arm/macro-assembler-arm.h

@ -24,6 +24,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
@ -87,11 +88,8 @@ enum TargetAddressStorageMode {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
// The isolate parameter can be NULL if the macro assembler should
// not use isolate-dependent functionality. In this case, it's the
// responsibility of the caller to never invoke such function on the
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@ -244,22 +242,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Label* object_is_white_and_not_data);
// Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@ -610,8 +596,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@ -624,7 +617,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
void LoadGlobalFunction(int index, Register function);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@ -642,15 +635,19 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@ -763,12 +760,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
void Allocate(Register object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@ -813,11 +806,11 @@ class MacroAssembler: public Assembler {
Register heap_number_map,
Label* gc_required);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst,
Register src,
LowDwVfpRegister double_scratch,
int field_count);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@ -827,12 +820,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@ -1079,33 +1071,30 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@ -1158,13 +1147,6 @@ class MacroAssembler: public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@ -1312,6 +1294,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@ -1462,8 +1448,6 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@ -1516,8 +1500,7 @@ class CodePatcher {
DONT_FLUSH
};
CodePatcher(byte* address,
int instructions,
CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@ -1545,13 +1528,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextOperand(Register context, int index = 0) {
inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}

124
deps/v8/src/arm/simulator-arm.cc

@ -390,7 +390,7 @@ void ArmDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@ -785,12 +785,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
Redirection(void* external_function, ExternalReference::Type type)
Redirection(Isolate* isolate, void* external_function,
ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
type_(type),
next_(NULL) {
Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@ -806,9 +806,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
static Redirection* Get(void* external_function,
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@ -816,7 +815,7 @@ class Redirection {
return current;
}
}
return new Redirection(external_function, type);
return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@ -861,9 +860,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
void* Simulator::RedirectExternalReference(void* external_function,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@ -3157,14 +3157,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
lazily_initialize_fast_sqrt(isolate_);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = fast_sqrt(dm_value);
double dd_value = fast_sqrt(dm_value, isolate_);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = fast_sqrt(sm_value);
float sd_value = fast_sqrt(sm_value, isolate_);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@ -3177,10 +3178,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
double dm_value = get_double_from_d_register(vm);
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = truncf(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else {
UNREACHABLE(); // Not used by V8.
}
@ -3869,44 +3877,60 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
instr->Bit(8) == 0x1) {
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
double dm_value = get_double_from_d_register(vm);
double dd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
case 0x0: // vrinta - round with ties to away from zero
dd_value = round(dm_value);
break;
case 0x1: { // vrintn - round with ties to even
dd_value = std::floor(dm_value);
double error = dm_value - dd_value;
// Take care of correctly handling the range [-0.5, -0.0], which
// must yield -0.0.
if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
dd_value = -0.0;
// If the error is greater than 0.5, or is equal to 0.5 and the
// integer result is odd, round up.
} else if ((error > 0.5) ||
((error == 0.5) && (fmod(dd_value, 2) != 0))) {
dd_value++;
instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2) {
if (instr->SzValue() == 0x1) {
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
double dm_value = get_double_from_d_register(vm);
double dd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
case 0x0: // vrinta - round with ties to away from zero
dd_value = round(dm_value);
break;
case 0x1: { // vrintn - round with ties to even
dd_value = nearbyint(dm_value);
break;
}
break;
case 0x2: // vrintp - ceil
dd_value = ceil(dm_value);
break;
case 0x3: // vrintm - floor
dd_value = floor(dm_value);
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
case 0x2: // vrintp - ceil
dd_value = std::ceil(dm_value);
break;
case 0x3: // vrintm - floor
dd_value = std::floor(dm_value);
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
float sm_value = get_float_from_s_register(m);
float sd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
case 0x0: // vrinta - round with ties to away from zero
sd_value = roundf(sm_value);
break;
case 0x1: { // vrintn - round with ties to even
sd_value = nearbyintf(sm_value);
break;
}
case 0x2: // vrintp - ceil
sd_value = ceilf(sm_value);
break;
case 0x3: // vrintm - floor
sd_value = floorf(sm_value);
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED();
}

44
deps/v8/src/arm/simulator-arm.h

@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
@ -33,9 +33,10 @@ typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
@ -48,11 +49,15 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
USE(isolate);
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
USE(isolate);
}
};
} // namespace internal
@ -344,7 +349,7 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
void* external_function,
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@ -426,17 +431,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_FP_INT(entry, p0, p1) \
Simulator::current(Isolate::Current())->CallFPReturnsInt( \
FUNCTION_ADDR(entry), p0, p1)
#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
Simulator::current(isolate) \
->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
@ -450,13 +455,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
Simulator::current(isolate)->PopAddress();
}
};

24
deps/v8/src/arm64/assembler-arm64-inl.h

@ -31,7 +31,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@ -648,24 +649,24 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
// Assembler::FlushICache(isolate(), pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@ -674,12 +675,11 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
void Assembler::set_target_address_at(Address pc,
Code* code,
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@ -725,7 +725,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, host_,
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@ -832,7 +832,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
Assembler::set_target_address_at(pc_, host_, target);
Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@ -848,7 +848,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(pc_, host_, NULL);
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}

34
deps/v8/src/arm64/assembler-arm64.cc

@ -511,7 +511,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->pc());
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
}
assm_->dc64(data);
}
@ -527,7 +527,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->pc());
instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@ -589,6 +589,7 @@ void Assembler::GetCode(CodeDesc* desc) {
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
}
}
@ -657,22 +658,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
prev_link->SetImmPCOffsetTarget(prev_link);
prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(next_link);
prev_link->SetImmPCOffsetTarget(isolate(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(prev_link);
prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
link->SetImmPCOffsetTarget(label_veneer);
link->SetImmPCOffsetTarget(isolate(), label_veneer);
link = next_link;
}
} else {
@ -743,10 +744,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
PatchingAssembler patcher(link, 2);
PatchingAssembler patcher(isolate(), link, 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
link->SetImmPCOffsetTarget(isolate(),
reinterpret_cast<Instruction*>(pc_));
}
// Link the label to the previous link in the chain.
@ -2829,6 +2831,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
@ -2866,9 +2869,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
@ -2895,8 +2898,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(
reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
rmode, RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@ -2985,9 +2988,8 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(buffer_ + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
NULL);
RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@ -3029,7 +3031,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(veneer);
branch->SetImmPCOffsetTarget(isolate(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=

30
deps/v8/src/arm64/assembler-arm64.h

@ -799,14 +799,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
static inline void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
@ -819,11 +817,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
Isolate* isolate, Address constant_pool_entry, Code* code,
Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Address pc, Address target,
Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@ -934,7 +933,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the emission of a constant pool.
//
@ -2150,15 +2149,14 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
PatchingAssembler(Instruction* start, unsigned count)
: Assembler(NULL,
reinterpret_cast<byte*>(start),
count * kInstructionSize + kGap) {
PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
: Assembler(isolate, reinterpret_cast<byte*>(start),
count * kInstructionSize + kGap) {
StartBlockPools();
}
PatchingAssembler(byte* start, unsigned count)
: Assembler(NULL, start, count * kInstructionSize + kGap) {
PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
: Assembler(isolate, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@ -2173,7 +2171,7 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
Assembler::FlushICacheWithoutIsolate(buffer_, length);
Assembler::FlushICache(isolate(), buffer_, length);
}
// See definition of PatchAdrFar() for details.

1817
deps/v8/src/arm64/builtins-arm64.cc

File diff suppressed because it is too large

435
deps/v8/src/arm64/code-stubs-arm64.cc

@ -223,7 +223,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
__ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@ -245,7 +245,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@ -336,10 +336,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are not
// equal since their pointers are different.
// There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label right_non_object;
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
@ -356,9 +356,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If right is not ODDBALL, test left. Otherwise, set eq condition.
__ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
// If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
// If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
// Otherwise, right or left is ODDBALL, so set a ge condition.
__ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
__ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
__ B(ge, &return_not_equal);
@ -471,11 +471,11 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Bind(&object_test);
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
// If right >= FIRST_SPEC_OBJECT_TYPE, test left.
// Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
__ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
// If right >= FIRST_JS_RECEIVER_TYPE, test left.
// Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
__ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
__ B(lt, not_both_strings);
@ -653,8 +653,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cond == eq) {
__ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
1);
__ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
@ -668,9 +667,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ TailCallRuntime(
is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
1);
__ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
: Runtime::kCompare);
}
__ Bind(&miss);
@ -966,7 +964,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
__ TailCallRuntime(Runtime::kMathPowRT);
// Return.
__ Bind(&done);
@ -1550,17 +1548,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
// Ensure that {function} is not bound.
Register const shared_info = scratch;
Register const scratch_w = scratch.W();
__ Ldr(shared_info,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
// On 64-bit platforms, compiler hints field is not a smi. See definition of
// kCompilerHintsOffset in src/objects.h.
__ Ldr(scratch_w, FieldMemOperand(shared_info,
SharedFunctionInfo::kCompilerHintsOffset));
__ Tbnz(scratch_w, SharedFunctionInfo::kBoundFunction, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
__ Ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@ -1585,27 +1572,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
Register const object_prototype = object_map;
Register const object_instance_type = function_map;
Register const map_bit_field = function_map;
Register const null = scratch;
Label done, loop;
__ LoadRoot(x0, Heap::kTrueValueRootIndex);
Register const result = x0;
Label done, loop, fast_runtime_fallback;
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ Bind(&loop);
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ Cmp(object_prototype, function_prototype);
// Check if the object needs to be access checked.
__ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
&fast_runtime_fallback);
// Check if the current object is a Proxy.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
__ B(eq, &fast_runtime_fallback);
__ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ Cmp(object, function_prototype);
__ B(eq, &done);
__ Cmp(object_prototype, null);
__ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ Cmp(object, null);
__ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ B(ne, &loop);
__ LoadRoot(x0, Heap::kFalseValueRootIndex);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ Bind(&done);
__ StoreRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
__ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
// Slow-case: Call the runtime function.
// Found Proxy or access check needed: Call the runtime
__ Bind(&fast_runtime_fallback);
__ Push(object, function_prototype);
// Invalidate the instanceof cache.
__ Move(scratch, Smi::FromInt(0));
__ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
__ TailCallRuntime(Runtime::kHasInPrototypeChain);
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
__ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
__ TailCallRuntime(Runtime::kInstanceOf);
}
@ -1656,7 +1663,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
__ TailCallRuntime(Runtime::kArguments, 1, 1);
__ TailCallRuntime(Runtime::kArguments);
}
@ -1687,7 +1694,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(x1, x3, x2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@ -1801,13 +1808,10 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x11 sloppy_args_map offset to args (or aliased args) map (uninit)
// x14 arg_count number of function arguments
Register global_object = x10;
Register global_ctx = x10;
Register sloppy_args_map = x11;
Register aliased_args_map = x10;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx,
FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
__ Ldr(global_ctx, NativeContextMemOperand());
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
@ -1965,7 +1969,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@ -1981,7 +1985,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@ -2047,14 +2051,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (native) context.
Register global_object = x10;
Register global_ctx = x10;
Register strict_args_map = x4;
__ Ldr(global_object, GlobalObjectMemOperand());
__ Ldr(global_ctx,
FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
__ Ldr(strict_args_map,
ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
strict_args_map);
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
@ -2118,13 +2117,61 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// x2 : number of parameters (tagged)
// x3 : parameters pointer
// x4 : rest parameter index (tagged)
//
// Returns pointer to result object in x0.
DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
// Get the stub arguments from the frame, and make an untagged copy of the
// parameter count.
Register rest_index_smi = x4;
Register param_count_smi = x2;
Register params = x3;
Register param_count = x13;
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
Register caller_fp = x11;
Register caller_ctx = x12;
Label runtime;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(caller_ctx,
MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &runtime);
// x4 rest_index_smi index of rest parameter
// x2 param_count_smi number of parameters passed to function (smi)
// x3 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
// Patch the argument length and parameters pointer.
__ Ldr(param_count_smi,
MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
__ Push(param_count_smi, params, rest_index_smi);
__ TailCallRuntime(Runtime::kNewRestParam);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@ -2565,7 +2612,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ B(eq, &runtime);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
@ -2574,7 +2621,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@ -2622,25 +2669,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector, Register index,
Register orig_construct, bool is_super) {
Register new_target) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
if (is_super) {
__ Push(argc, function, feedback_vector, index, orig_construct);
} else {
__ Push(argc, function, feedback_vector, index);
}
__ Push(argc, function, feedback_vector, index);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
if (is_super) {
__ Pop(orig_construct, index, feedback_vector, function, argc);
} else {
__ Pop(index, feedback_vector, function, argc);
}
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
@ -2648,19 +2687,17 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
Register orig_construct, Register scratch1,
Register scratch2, Register scratch3,
bool is_super) {
Register new_target, Register scratch1,
Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
feedback_vector, index, orig_construct));
feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
// orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@ -2703,7 +2740,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
__ B(&done);
@ -2727,7 +2764,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ Bind(&initialize);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &not_array_function);
@ -2736,13 +2773,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, orig_construct, is_super);
feedback_vector, index, new_target);
__ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
feedback_vector, index, orig_construct, is_super);
feedback_vector, index, new_target);
__ Bind(&done);
}
@ -2753,7 +2790,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label non_function;
@ -2764,28 +2800,21 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
&non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
IsSuperConstructorCall());
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into x2, or undefined.
__ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
__ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
__ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
&feedback_register_initialized);
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(x2, x5);
}
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
if (IsSuperConstructorCall()) {
__ Mov(x3, x4);
} else {
__ Mov(x3, function);
}
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into x2, or undefined.
__ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
__ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
__ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
&feedback_register_initialized);
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
__ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(x2, x5);
__ Mov(x3, function);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@ -2811,7 +2840,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
Register allocation_site = x4;
Register scratch = x5;
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
__ B(ne, miss);
@ -2828,9 +2857,9 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Set up arguments for the array constructor stub.
Register allocation_site_arg = feedback_vector;
Register original_constructor_arg = index;
Register new_target_arg = index;
__ Mov(allocation_site_arg, allocation_site);
__ Mov(original_constructor_arg, function);
__ Mov(new_target_arg, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
}
@ -2842,11 +2871,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
// x2 - vector
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, call;
Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@ -2890,9 +2915,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
__ bind(&call);
__ Bind(&call_function);
__ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@ -2922,14 +2948,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Subs(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
__ B(&call);
__ Bind(&call);
__ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@ -2941,14 +2964,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
__ Cmp(function, x5);
__ B(eq, &miss);
// Update stats.
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
// Make sure the function belongs to the same native context.
__ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
__ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
__ Ldr(x5, NativeContextMemOperand());
__ Cmp(x4, x5);
__ B(ne, &miss);
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
@ -2968,7 +2993,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(function);
}
__ B(&call);
__ B(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@ -2988,7 +3013,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss, 3);
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@ -3046,11 +3071,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
__ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@ -3078,7 +3103,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
__ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@ -3109,7 +3134,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
__ CallRuntime(Runtime::kStringCharFromCode, 1);
__ CallRuntime(Runtime::kStringCharFromCode);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@ -3127,7 +3152,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
@ -3381,9 +3406,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(lhs, rhs);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
__ TailCallRuntime(Runtime::kStringEquals);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare);
}
__ Bind(&miss);
@ -3391,9 +3416,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
void CompareICStub::GenerateObjects(MacroAssembler* masm) {
DCHECK(state() == CompareICState::OBJECT);
ASM_LOCATION("CompareICStub[Objects]");
void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::RECEIVER, state());
ASM_LOCATION("CompareICStub[Receivers]");
Label miss;
@ -3403,10 +3428,11 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
__ JumpIfEitherSmi(rhs, lhs, &miss);
__ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
__ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
__ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
DCHECK(GetCondition() == eq);
DCHECK_EQ(eq, GetCondition());
__ Sub(result, rhs, lhs);
__ Ret();
@ -3415,8 +3441,8 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
ASM_LOCATION("CompareICStub[KnownObjects]");
void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
ASM_LOCATION("CompareICStub[KnownReceivers]");
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
@ -3442,7 +3468,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Sub(result, rhs, lhs);
__ Ret();
} else if (is_strong(strength())) {
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
__ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
Register ncr = x2;
if (op() == Token::LT || op() == Token::LTE) {
@ -3451,7 +3477,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Mov(ncr, Smi::FromInt(LESS));
}
__ Push(lhs, rhs, ncr);
__ TailCallRuntime(Runtime::kCompare, 3, 1);
__ TailCallRuntime(Runtime::kCompare);
}
__ Bind(&miss);
@ -3479,7 +3505,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
__ CallRuntime(Runtime::kCompareIC_Miss, 3);
__ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@ -3725,7 +3751,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// x1: result_length
@ -3771,7 +3797,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
__ TailCallRuntime(Runtime::kStringToNumber);
__ Bind(&not_string);
Label not_oddball;
@ -3782,7 +3808,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber, 1, 1);
__ TailCallRuntime(Runtime::kToNumber);
}
@ -3797,7 +3823,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ Bind(&not_smi);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kToLength, 1, 1);
__ TailCallRuntime(Runtime::kToLength);
}
@ -3829,7 +3855,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kToString, 1, 1);
__ TailCallRuntime(Runtime::kToString);
}
@ -3973,7 +3999,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ Bind(&runtime);
__ Push(x1, x0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompare);
}
@ -4115,12 +4141,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
__ EnsureNotWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
regs_.scratch2(), // Scratch.
&need_incremental_pop_scratch);
__ JumpIfWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
regs_.scratch2(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm); // Restore the extra scratch registers we used.
@ -4170,76 +4196,6 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// x0 value element value to store
// x3 index_smi element index as smi
// sp[0] array_index_smi array literal index in function as smi
// sp[1] array array literal
Register value = x0;
Register index_smi = x3;
Register array = x1;
Register array_map = x2;
Register array_index_smi = x4;
__ PeekPair(array_index_smi, array, 0);
__ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
Label double_elements, smi_element, fast_elements, slow_elements;
Register bitfield2 = x10;
__ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
// Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
// FAST_HOLEY_ELEMENTS.
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
__ B(hi, &double_elements);
__ JumpIfSmi(value, &smi_element);
// Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
__ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
&fast_elements);
// Store into the array literal requires an elements transition. Call into
// the runtime.
__ Bind(&slow_elements);
__ Push(array, index_smi, value);
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
__ Push(x11, array_index_smi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ Bind(&fast_elements);
__ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
__ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
__ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
__ Str(value, MemOperand(x11));
// Update the write barrier for the array store.
__ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ Bind(&smi_element);
__ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
__ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
__ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
__ Ret();
__ Bind(&double_elements);
__ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
&slow_elements);
__ Ret();
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@ -5230,12 +5186,12 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
// -- x3 : original constructor
// -- x3 : new target
// -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
Register original_constructor = x3;
Register new_target = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
@ -5257,8 +5213,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
}
// Enter the context of the Array function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
Label subclassing;
__ Cmp(original_constructor, constructor);
__ Cmp(new_target, constructor);
__ B(ne, &subclassing);
Register kind = x3;
@ -5277,22 +5236,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing support.
__ Bind(&subclassing);
__ Push(constructor, original_constructor);
// Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
__ add(x0, x0, Operand(2));
__ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
__ Add(x0, x0, Operand(3));
break;
case NONE:
__ Mov(x0, Operand(2));
__ Poke(constructor, 0 * kPointerSize);
__ Mov(x0, Operand(3));
break;
case ONE:
__ Mov(x0, Operand(3));
__ Poke(constructor, 1 * kPointerSize);
__ Mov(x0, Operand(4));
break;
}
__ JumpToExternalReference(
ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
__ Push(new_target, allocation_site);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@ -5408,7 +5368,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Bind(&slow_case);
__ SmiTag(slot);
__ Push(slot);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@ -5528,8 +5488,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
2, 1);
: Runtime::kStoreGlobalViaContext_Sloppy);
}
@ -5682,7 +5641,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ Bind(&promote_scheduled_exception);
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
__ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ Bind(&delete_allocated_handles);

1
deps/v8/src/arm64/code-stubs-arm64.h

@ -131,6 +131,7 @@ class RecordWriteStub: public PlatformCodeStub {
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
stub->GetIsolate(),
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);

32
deps/v8/src/arm64/codegen-arm64.cc

@ -16,9 +16,9 @@ namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_arm64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
byte* fast_exp_arm64_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
Simulator * simulator = Simulator::current(isolate);
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
@ -28,19 +28,18 @@ double fast_exp_simulator(double x) {
#endif
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
@ -64,11 +63,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
@ -76,8 +75,8 @@ UnaryMathFunction CreateExpFunction() {
}
UnaryMathFunction CreateSqrtFunction() {
return &std::sqrt;
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
@ -368,12 +367,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
}
CodeAgingHelper::CodeAgingHelper() {
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
PatchingAssembler patcher(young_sequence_.start(),
PatchingAssembler patcher(isolate, young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
@ -382,7 +382,7 @@ CodeAgingHelper::CodeAgingHelper() {
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(old_sequence_.start(), length);
PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
@ -417,7 +417,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
PatchingAssembler patcher(sequence,
PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);

2
deps/v8/src/arm64/codegen-arm64.h

@ -5,7 +5,7 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast.h"
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {

8
deps/v8/src/arm64/constants-arm64.h

@ -9,11 +9,11 @@
#include "src/globals.h"
// Assert that this is an LP64 system.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
// Get the standard printf format macros for C99 stdint types.

3
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -49,7 +49,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
PatchingAssembler patcher(isolate, call_address,
patch_size() / kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));

15
deps/v8/src/arm64/instructions-arm64.cc

@ -219,13 +219,13 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
}
void Instruction::SetImmPCOffsetTarget(Instruction* target) {
void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
SetPCRelImmTarget(isolate, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
SetUnresolvedInternalReferenceImmTarget(target);
SetUnresolvedInternalReferenceImmTarget(isolate, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
@ -233,7 +233,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
}
void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
PatchingAssembler patcher(isolate, this,
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@ -284,7 +284,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@ -293,7 +294,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
PatchingAssembler patcher(this, 2);
PatchingAssembler patcher(isolate, this, 2);
patcher.brk(high16);
patcher.brk(low16);
}

7
deps/v8/src/arm64/instructions-arm64.h

@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(Instruction* target);
void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@ -410,7 +411,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Instruction* target);
void SetPCRelImmTarget(Isolate* isolate, Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

67
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
const Register RestParamAccessDescriptor::parameter_count() { return x2; }
const Register RestParamAccessDescriptor::parameter_pointer() { return x3; }
const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; }
const Register ApiGetterDescriptor::function_address() { return x2; }
@ -129,9 +134,20 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {x3, x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: array literals array
// x3: closure
// x2: array literal index
// x1: constant elements
Register registers[] = {x3, x2, x1};
@ -141,7 +157,7 @@ void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: object literals array
// x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
@ -204,7 +220,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : original constructor (for IsSuperConstructorCall)
// x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
@ -221,6 +237,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
// x1: target
// x0: number of arguments
// x2: allocation site or undefined
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
// x1: target
// x0: number of arguments
Register registers[] = {x1, x3, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: length
@ -387,6 +424,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x1, // JSFunction
x3, // the new target
x0, // actual number of arguments
x2, // expected number of arguments
};
@ -428,27 +466,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void MathRoundVariantCallFromOptimizedCodeDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
x4, // type vector
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@ -464,7 +481,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x3, // original constructor
x3, // new target
x1, // constructor to call
x2 // address of the first argument
};

34
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -1434,32 +1434,6 @@ void MacroAssembler::IsObjectNameType(Register object,
}
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail) {
Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
IsInstanceJSObjectType(map, scratch, fail);
}
void MacroAssembler::IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail) {
Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
// If cmp result is lt, the following ccmp will clear all flags.
// Z == 0, N == V implies gt condition.
Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
// If we didn't get a valid label object just fall through and leave the
// flags updated.
if (fail != NULL) {
B(gt, fail);
}
}
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
Label* not_string,
@ -1488,7 +1462,8 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@ -1516,6 +1491,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
return;
}
AssertPositiveOrZero(count);
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
@ -1543,7 +1519,8 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
}
void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@ -1574,6 +1551,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
return;
}
AssertPositiveOrZero(count);
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {

605
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -22,9 +22,9 @@ namespace internal {
#define __
MacroAssembler::MacroAssembler(Isolate* arg_isolate,
byte * buffer,
unsigned buffer_size)
MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, buffer_size),
generating_stub_(false),
#if DEBUG
@ -35,7 +35,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate,
sp_(jssp),
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
if (isolate() != NULL) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@ -1343,6 +1343,8 @@ void MacroAssembler::AssertStackConsistency() {
// Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
Abort(kTheCurrentStackPointerIsBelowCsp);
}
@ -1626,6 +1628,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
Check(eq, kOperandIsNotABoundFunction);
}
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@ -1654,6 +1669,17 @@ void MacroAssembler::AssertString(Register object) {
}
void MacroAssembler::AssertPositiveOrZero(Register value) {
if (emit_debug_code()) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
Tbz(value, sign_bit, &done);
Abort(kUnexpectedNegativeValue);
Bind(&done);
}
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@ -1701,62 +1727,30 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
Ldr(target, GlobalObjectMemOperand());
Ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
Ldr(target, ContextMemOperand(target, native_context_index));
}
void MacroAssembler::GetBuiltinEntry(Register target, Register function,
int native_context_index) {
DCHECK(!AreAliased(target, function));
GetBuiltinFunction(function, native_context_index);
// Load the code entry point from the builtins object.
Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Get the builtin entry in x2 and setup the function object in x1.
GetBuiltinEntry(x2, x1, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(x2));
Call(x2);
call_wrapper.AfterCall();
} else {
DCHECK(flag == JUMP_FUNCTION);
Jump(x2);
}
}
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Mov(x0, num_arguments);
JumpToExternalReference(ext);
// Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
LoadNativeContextSlot(native_context_index, x1);
InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
TailCallExternalReference(ExternalReference(fid, isolate()),
num_arguments,
result_size);
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Mov(x0, function->nargs);
}
JumpToExternalReference(ExternalReference(fid, isolate()));
}
@ -2153,152 +2147,6 @@ void MacroAssembler::ClampDoubleToUint8(Register output,
}
void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
Register src,
unsigned count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5) {
// Untag src and dst into scratch registers.
// Copy src->dst in a tight loop.
DCHECK(!AreAliased(dst, src,
scratch1, scratch2, scratch3, scratch4, scratch5));
DCHECK(count >= 2);
const Register& remaining = scratch3;
Mov(remaining, count / 2);
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
Sub(dst_untagged, dst, kHeapObjectTag);
Sub(src_untagged, src, kHeapObjectTag);
// Copy fields in pairs.
Label loop;
Bind(&loop);
Ldp(scratch4, scratch5,
MemOperand(src_untagged, kXRegSize* 2, PostIndex));
Stp(scratch4, scratch5,
MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
Sub(remaining, remaining, 1);
Cbnz(remaining, &loop);
// Handle the leftovers.
if (count & 1) {
Ldr(scratch4, MemOperand(src_untagged));
Str(scratch4, MemOperand(dst_untagged));
}
}
void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
Register src,
unsigned count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
sub(dst_untagged, dst, kHeapObjectTag);
sub(src_untagged, src, kHeapObjectTag);
// Copy fields in pairs.
for (unsigned i = 0; i < count / 2; i++) {
Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
}
// Handle the leftovers.
if (count & 1) {
Ldr(scratch3, MemOperand(src_untagged));
Str(scratch3, MemOperand(dst_untagged));
}
}
void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
Register src,
unsigned count,
Register scratch1,
Register scratch2,
Register scratch3) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
Sub(dst_untagged, dst, kHeapObjectTag);
Sub(src_untagged, src, kHeapObjectTag);
// Copy fields one by one.
for (unsigned i = 0; i < count; i++) {
Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
}
}
void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
unsigned count) {
// One of two methods is used:
//
// For high 'count' values where many scratch registers are available:
// Untag src and dst into scratch registers.
// Copy src->dst in a tight loop.
//
// For low 'count' values or where few scratch registers are available:
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
//
// In both cases, fields are copied in pairs if possible, and left-overs are
// handled separately.
DCHECK(!AreAliased(dst, src));
DCHECK(!temps.IncludesAliasOf(dst));
DCHECK(!temps.IncludesAliasOf(src));
DCHECK(!temps.IncludesAliasOf(xzr));
if (emit_debug_code()) {
Cmp(dst, src);
Check(ne, kTheSourceAndDestinationAreTheSame);
}
// The value of 'count' at which a loop will be generated (if there are
// enough scratch registers).
static const unsigned kLoopThreshold = 8;
UseScratchRegisterScope masm_temps(this);
if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
CopyFieldsLoopPairsHelper(dst, src, count,
Register(temps.PopLowestIndex()),
Register(temps.PopLowestIndex()),
Register(temps.PopLowestIndex()),
masm_temps.AcquireX(),
masm_temps.AcquireX());
} else if (temps.Count() >= 2) {
CopyFieldsUnrolledPairsHelper(dst, src, count,
Register(temps.PopLowestIndex()),
Register(temps.PopLowestIndex()),
masm_temps.AcquireX(),
masm_temps.AcquireX());
} else if (temps.Count() == 1) {
CopyFieldsUnrolledHelper(dst, src, count,
Register(temps.PopLowestIndex()),
masm_temps.AcquireX(),
masm_temps.AcquireX());
} else {
UNREACHABLE();
}
}
void MacroAssembler::CopyBytes(Register dst,
Register src,
Register length,
@ -2354,38 +2202,35 @@ void MacroAssembler::CopyBytes(Register dst,
}
void MacroAssembler::FillFields(Register dst,
Register field_count,
Register filler) {
DCHECK(!dst.Is(csp));
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
DCHECK(!current_address.Is(csp));
UseScratchRegisterScope temps(this);
Register field_ptr = temps.AcquireX();
Register counter = temps.AcquireX();
Register distance_in_words = temps.AcquireX();
Label done;
// Decrement count. If the result < zero, count was zero, and there's nothing
// to do. If count was one, flags are set to fail the gt condition at the end
// of the pairs loop.
Subs(counter, field_count, 1);
B(lt, &done);
// Calculate the distance. If it's <= zero then there's nothing to do.
Subs(distance_in_words, end_address, current_address);
B(le, &done);
// There's at least one field to fill, so do this unconditionally.
Str(filler, MemOperand(dst, kPointerSize, PostIndex));
Str(filler, MemOperand(current_address));
// If the bottom bit of counter is set, there are an even number of fields to
// fill, so pull the start pointer back by one field, allowing the pairs loop
// to overwrite the field that was stored above.
And(field_ptr, counter, 1);
Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
// If the distance_in_words consists of odd number of words we advance
// start_address by one word, otherwise the pairs loop will ovwerite the
// field that was stored above.
And(distance_in_words, distance_in_words, kPointerSize);
Add(current_address, current_address, distance_in_words);
// Store filler to memory in pairs.
Label entry, loop;
Label loop, entry;
B(&entry);
Bind(&loop);
Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
Subs(counter, counter, 2);
Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Bind(&entry);
B(gt, &loop);
Cmp(current_address, end_address);
B(lo, &loop);
Bind(&done);
}
@ -2481,8 +2326,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
@ -2502,7 +2345,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(x0));
DCHECK(expected.is_immediate() || expected.reg().is(x2));
DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@ -2537,11 +2379,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// If the argument counts may mismatch, generate a call to the argument
// adaptor.
if (!definitely_matches) {
if (!code_constant.is_null()) {
Mov(x3, Operand(code_constant));
Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
}
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@ -2550,7 +2387,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
// If the arg counts don't match, no extra code is emitted by
// MAsm::InvokeCode and we can just fall through.
// MAsm::InvokeFunctionCode and we can just fall through.
B(done);
}
} else {
@ -2561,24 +2398,80 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference step_in_enabled =
ExternalReference::debug_step_in_enabled_address(isolate());
Mov(x4, Operand(step_in_enabled));
ldrb(x4, MemOperand(x4));
CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
SmiTag(expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
SmiTag(actual.reg());
Push(actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiUntag(actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiUntag(expected.reg());
}
}
bind(&skip_flooding);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function.is(x1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
Label done;
FloodFunctionIfStepping(function, new_target, expected, actual);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(x3, Heap::kUndefinedValueRootIndex);
}
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
&definitely_mismatches, call_wrapper);
InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
call_wrapper);
// If we are certain that actual != expected, then we know InvokePrologue will
// have handled the call through the argument adaptor mechanism.
// The called function expects the call kind in x5.
if (!definitely_mismatches) {
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = x4;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@ -2596,6 +2489,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@ -2607,7 +2501,6 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(x1));
Register expected_reg = x2;
Register code_reg = x3;
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
@ -2618,11 +2511,10 @@ void MacroAssembler::InvokeFunction(Register function,
Ldrsw(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
Ldr(code_reg,
FieldMemOperand(function, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag, call_wrapper);
InvokeFunctionCode(function, new_target, expected, actual, flag,
call_wrapper);
}
@ -2638,16 +2530,10 @@ void MacroAssembler::InvokeFunction(Register function,
// (See FullCodeGenerator::Generate().)
DCHECK(function.Is(x1));
Register code_reg = x3;
// Set up the context.
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
InvokeCode(code_reg, expected, actual, flag, call_wrapper);
InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
}
@ -2760,14 +2646,13 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
void MacroAssembler::StubPrologue() {
DCHECK(StackPointer().Is(jssp));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(StackFrame::STUB));
// Compiled stubs don't age, and so they don't need the predictable code
// ageing sequence.
__ Push(lr, fp, cp, temp);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
__ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
@ -3000,12 +2885,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
Ldr(dst, GlobalObjectMemOperand());
Ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
@ -3084,23 +2963,24 @@ void MacroAssembler::Allocate(int object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
// Set up allocation top address and allocation limit registers.
Register top_address = scratch1;
Register allocation_limit = scratch2;
Register alloc_limit = scratch2;
Register result_end = scratch3;
Mov(top_address, Operand(heap_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and the allocation limit.
Ldp(result, allocation_limit, MemOperand(top_address));
// Load allocation top into result and allocation limit into alloc_limit.
Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
Ldr(scratch3, MemOperand(top_address));
Cmp(result, scratch3);
Ldr(alloc_limit, MemOperand(top_address));
Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load the allocation limit. 'result' already contains the allocation top.
Ldr(allocation_limit, MemOperand(top_address, limit - top));
// Load allocation limit. Result already contains allocation top.
Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@ -3108,10 +2988,10 @@ void MacroAssembler::Allocate(int object_size,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted.
Adds(scratch3, result, object_size);
Ccmp(scratch3, allocation_limit, CFlag, cc);
Adds(result_end, result, object_size);
Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
Str(scratch3, MemOperand(top_address));
Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@ -3120,30 +3000,29 @@ void MacroAssembler::Allocate(int object_size,
}
void MacroAssembler::Allocate(Register object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
// We apply salt to the original zap value to easily spot the values.
Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
}
B(gc_required);
return;
}
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
Register scratch2 = temps.AcquireX();
DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
scratch1.Is64Bits() && scratch2.Is64Bits());
// |object_size| and |result_end| may overlap, other registers must not.
DCHECK(!AreAliased(object_size, result, scratch, scratch2));
DCHECK(!AreAliased(result_end, result, scratch, scratch2));
DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
result_end.Is64Bits());
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
@ -3155,23 +3034,23 @@ void MacroAssembler::Allocate(Register object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
Register allocation_limit = scratch2;
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
Register alloc_limit = scratch2;
Mov(top_address, heap_allocation_top);
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and the allocation limit.
Ldp(result, allocation_limit, MemOperand(top_address));
// Load allocation top into result and allocation limit into alloc_limit.
Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
Ldr(scratch3, MemOperand(top_address));
Cmp(result, scratch3);
Ldr(alloc_limit, MemOperand(top_address));
Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load the allocation limit. 'result' already contains the allocation top.
Ldr(allocation_limit, MemOperand(top_address, limit - top));
// Load allocation limit. Result already contains allocation top.
Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@ -3180,19 +3059,19 @@ void MacroAssembler::Allocate(Register object_size,
// Calculate new top and bail out if new space is exhausted
if ((flags & SIZE_IN_WORDS) != 0) {
Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
Adds(scratch3, result, object_size);
Adds(result_end, result, object_size);
}
if (emit_debug_code()) {
Tst(scratch3, kObjectAlignmentMask);
Tst(result_end, kObjectAlignmentMask);
Check(eq, kUnalignedAllocationInNewSpace);
}
Ccmp(scratch3, allocation_limit, CFlag, cc);
Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
Str(scratch3, MemOperand(top_address));
Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@ -3390,6 +3269,28 @@ void MacroAssembler::JumpIfObjectType(Register object,
}
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch1,
Register scratch2, Label* gc_required) {
DCHECK(!result.is(constructor));
DCHECK(!result.is(scratch1));
DCHECK(!result.is(scratch2));
DCHECK(!result.is(value));
// Allocate JSValue in new space.
Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
Str(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
void MacroAssembler::JumpIfNotObjectType(Register object,
Register map,
Register type_reg,
@ -3779,11 +3680,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
Ldr(scratch1, FieldMemOperand(scratch1, offset));
Ldr(scratch1,
FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@ -4314,8 +4211,8 @@ void MacroAssembler::HasColor(Register object,
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
// Check for the color.
if (first_bit == 0) {
@ -4343,8 +4240,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
@ -4380,21 +4277,18 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register shift_scratch,
Register load_scratch,
Register length_scratch,
Label* value_is_white_and_not_data) {
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register shift_scratch, Register load_scratch,
Register length_scratch,
Label* value_is_white) {
DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@ -4405,71 +4299,7 @@ void MacroAssembler::EnsureNotWhite(
// If the value is black or grey we don't need to do anything.
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
Label done;
Tbnz(load_scratch, 0, &done);
// Value is white. We check whether it is data that doesn't need scanning.
Register map = load_scratch; // Holds map while checking type.
Label is_data_object;
// Check for heap-number.
Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
Mov(length_scratch, HeapNumber::kSize);
JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
// Check for strings.
DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
TestAndBranchIfAnySet(instance_type,
kIsIndirectStringMask | kIsNotStringMask,
value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
// Sequential string, either Latin1 or UC16.
// For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
String::kLengthOffset));
Tst(instance_type, kStringEncodingMask);
Cset(load_scratch, eq);
Lsl(length_scratch, length_scratch, load_scratch);
Add(length_scratch,
length_scratch,
SeqString::kHeaderSize + kObjectAlignmentMask);
Bic(length_scratch, length_scratch, kObjectAlignmentMask);
Bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
Register mask = shift_scratch;
Mov(load_scratch, 1);
Lsl(mask, load_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Orr(load_scratch, load_scratch, mask);
Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
Add(load_scratch, load_scratch, length_scratch);
Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
Bind(&done);
Tbz(load_scratch, 0, value_is_white);
}
@ -4615,32 +4445,25 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch1,
Register scratch2,
Label* no_map_match) {
// Load the global or builtins object from the current context.
Ldr(scratch1, GlobalObjectMemOperand());
Ldr(scratch1,
FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset));
Ldr(scratch1, NativeContextMemOperand());
Ldr(scratch2,
ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
// Use the transitioned cached map.
offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(map_in_out, FieldMemOperand(scratch1, offset));
Ldr(map_in_out,
ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
Ldr(function, GlobalObjectMemOperand());
// Load the native context from the global or builtins object.
Ldr(function,
FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
Ldr(function, ContextMemOperand(function, index));
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ldr(dst, NativeContextMemOperand());
Ldr(dst, ContextMemOperand(dst, index));
}

167
deps/v8/src/arm64/macro-assembler-arm64.h

@ -45,6 +45,7 @@ namespace internal {
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallNewTargetRegister x3
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@ -145,7 +146,8 @@ enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
class MacroAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
CodeObjectRequired create_code_object);
inline Handle<Object> CodeObject();
@ -722,10 +724,10 @@ class MacroAssembler : public Assembler {
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(const Register& count,
uint64_t unit_size = kXRegSize);
inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count,
uint64_t unit_size = kXRegSize);
@ -893,6 +895,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
Register descriptors);
@ -963,6 +966,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@ -970,6 +977,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code.
void AssertPositiveOrZero(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@ -1027,22 +1038,11 @@ class MacroAssembler : public Assembler {
// ---- Object Utilities ----
// Copy fields from 'src' to 'dst', where both are tagged objects.
// The 'temps' list is a list of X registers which can be used for scratch
// values. The temps list must include at least one register.
//
// Currently, CopyFields cannot make use of more than three registers from
// the 'temps' list.
//
// CopyFields expects to be able to take at least two registers from
// MacroAssembler::TmpList().
void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
// Starting at address in dst, initialize field_count 64-bit fields with
// 64-bit value in register filler. Register dst is corrupted.
void FillFields(Register dst,
Register field_count,
Register filler);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// Copies a number of bytes from src to dst. All passed registers are
// clobbered. On exit src and dst will point to the place just after where the
@ -1094,20 +1094,25 @@ class MacroAssembler : public Assembler {
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
void TailCallRuntime(Runtime::FunctionId fid);
int ActivationFrameAlignment();
@ -1127,12 +1132,8 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@ -1141,14 +1142,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in the function register.
void GetBuiltinEntry(Register target, Register function,
int native_context_index);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@ -1179,20 +1172,21 @@ class MacroAssembler : public Assembler {
// 'call_kind' must be x5.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunction(Register function,
Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@ -1297,12 +1291,8 @@ class MacroAssembler : public Assembler {
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
// If flags includes TAG_OBJECT, the result is tagged as as a heap object.
void Allocate(Register object_size,
Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int object_size,
Register result,
@ -1350,6 +1340,12 @@ class MacroAssembler : public Assembler {
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
// ---------------------------------------------------------------------------
// Support functions.
@ -1485,20 +1481,6 @@ class MacroAssembler : public Assembler {
// Fall-through if the object was a string and jump on fail otherwise.
inline void IsObjectNameType(Register object, Register type, Label* fail);
inline void IsObjectJSObjectType(Register heap_object,
Register map,
Register scratch,
Label* fail);
// Check the instance type in the given map to see if it corresponds to a
// JS object type. Jump to the fail label if this is not the case and fall
// through otherwise. However if fail label is NULL, no branch will be
// performed and the flag will be updated. You can test the flag for "le"
// condition to test if it is a valid JS object type.
inline void IsInstanceJSObjectType(Register map,
Register scratch,
Label* fail);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
@ -1688,8 +1670,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// Emit code for a truncating division by a constant. The dividend register is
// unchanged. Dividend and result must be different.
@ -1825,23 +1814,10 @@ class MacroAssembler : public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* object_is_white_and_not_data);
// Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Register scratch4, Label* value_is_white);
// Helper for finding the mark bits for an address.
// Note that the behaviour slightly differs from other architectures.
@ -1911,7 +1887,7 @@ class MacroAssembler : public Assembler {
Register scratch2,
Label* no_map_match);
void LoadGlobalFunction(int index, Register function);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
// map can be the same, function is then overwritten.
@ -2013,19 +1989,6 @@ class MacroAssembler : public Assembler {
void PopPostamble(int count, int size) { PopPostamble(count * size); }
private:
// Helpers for CopyFields.
// These each implement CopyFields in a different way.
void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
Register scratch1, Register scratch2,
Register scratch3, Register scratch4,
Register scratch5);
void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
Register scratch1, Register scratch2,
Register scratch3, Register scratch4);
void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
Register scratch1, Register scratch2,
Register scratch3);
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
@ -2229,8 +2192,8 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand GlobalObjectMemOperand() {
return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}

25
deps/v8/src/arm64/simulator-arm64.cc

@ -462,13 +462,11 @@ void Simulator::RunFrom(Instruction* start) {
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
type_(type),
next_(NULL) {
Redirection(Isolate* isolate, void* external_function,
ExternalReference::Type type)
: external_function_(external_function), type_(type), next_(NULL) {
redirect_call_.SetInstructionBits(
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
// TODO(all): Simulator flush I cache
isolate->set_simulator_redirection(this);
@ -483,9 +481,8 @@ class Redirection {
ExternalReference::Type type() { return type_; }
static Redirection* Get(void* external_function,
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@ -493,7 +490,7 @@ class Redirection {
return current;
}
}
return new Redirection(external_function, type);
return new Redirection(isolate, external_function, type);
}
static Redirection* FromHltInstruction(Instruction* redirect_call) {
@ -748,9 +745,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
void* Simulator::RedirectExternalReference(void* external_function,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_redirect_call();
}
@ -2761,7 +2759,7 @@ double Simulator::FPRoundInt(double value, FPRounding round_mode) {
// If the error is greater than 0.5, or is equal to 0.5 and the integer
// result is odd, round up.
} else if ((error > 0.5) ||
((error == 0.5) && (fmod(int_result, 2) != 0))) {
((error == 0.5) && (modulo(int_result, 2) != 0))) {
int_result++;
}
break;
@ -3107,7 +3105,8 @@ T Simulator::FPSqrt(T op) {
} else if (op < 0.0) {
return FPDefaultNaN<T>();
} else {
return fast_sqrt(op);
lazily_initialize_fast_sqrt(isolate_);
return fast_sqrt(op, isolate_);
}
}
@ -3510,7 +3509,7 @@ void Simulator::Debug() {
reinterpret_cast<uint64_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
Heap* current_heap = isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {

46
deps/v8/src/arm64/simulator-arm64.h

@ -24,7 +24,7 @@ namespace internal {
// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm64_regexp_matcher)(String* input,
@ -42,24 +42,29 @@ typedef int (*arm64_regexp_matcher)(String* input,
// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
USE(isolate);
return try_catch_address;
}
static void UnregisterCTryCatch() { }
static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
USE(isolate);
}
};
#else // !defined(USE_SIMULATOR)
@ -272,7 +277,8 @@ class Simulator : public DecoderVisitor {
void ResetState();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
@ -871,15 +877,14 @@ class Simulator : public DecoderVisitor {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
static_cast<int>( \
Simulator::current(Isolate::Current()) \
->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->CallRegExp( \
entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from
@ -893,13 +898,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
static void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
Simulator::current(isolate)->PopAddress();
}
};

68
deps/v8/src/assembler.cc

@ -51,6 +51,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/ostreams.h"
#include "src/parsing/token.h"
#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@ -59,7 +60,6 @@
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
#include "src/token.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
@ -173,7 +173,8 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
constant_pool_available_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
DCHECK_NOT_NULL(isolate);
if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
own_buffer_ = buffer == NULL;
@ -204,19 +205,6 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
}
void AssemblerBase::FlushICacheWithoutIsolate(void* start, size_t size) {
// Ideally we would just call Isolate::Current() here. However, this flushes
// out issues because we usually only need the isolate when in the simulator.
Isolate* isolate;
#if defined(USE_SIMULATOR)
isolate = Isolate::Current();
#else
isolate = nullptr;
#endif // USE_SIMULATOR
FlushICache(isolate, start, size);
}
void AssemblerBase::Print() {
OFStream os(stdout);
v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
@ -520,8 +508,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
RelocInfo::IsVeneerPool(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@ -712,8 +699,7 @@ void RelocIterator::next() {
Advance(kIntSize);
}
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
RelocInfo::IsVeneerPool(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@ -738,7 +724,8 @@ void RelocIterator::next() {
}
RelocIterator::RelocIterator(Code* code, int mode_mask) {
RelocIterator::RelocIterator(Code* code, int mode_mask)
: rinfo_(code->map()->GetIsolate()) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@ -763,7 +750,8 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: rinfo_(desc.origin->isolate()) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// Relocation info is read backwards.
@ -807,8 +795,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc 64";
case EMBEDDED_OBJECT:
return "embedded object";
case CONSTRUCT_CALL:
return "code target (js construct call)";
case DEBUGGER_STATEMENT:
return "debugger statement";
case CODE_TARGET:
@ -843,8 +829,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
return "debug break slot at construct call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
@ -909,7 +893,6 @@ void RelocInfo::Verify(Isolate* isolate) {
Object::VerifyPointer(target_cell());
break;
case DEBUGGER_STATEMENT:
case CONSTRUCT_CALL:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@ -942,7 +925,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
case GENERATOR_CONTINUATION:
case NONE32:
case NONE64:
@ -959,12 +941,6 @@ void RelocInfo::Verify(Isolate* isolate) {
#endif // VERIFY_HEAP
int RelocInfo::DebugBreakCallArgumentsCount(intptr_t data) {
return static_cast<int>(data);
}
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
void ExternalReference::SetUp() {
@ -1441,14 +1417,6 @@ ExternalReference ExternalReference::debug_after_break_target_address(
}
ExternalReference
ExternalReference::debug_restarter_frame_function_pointer_address(
Isolate* isolate) {
return ExternalReference(
isolate->debug()->restarter_frame_function_pointer_address());
}
ExternalReference ExternalReference::virtual_handler_register(
Isolate* isolate) {
return ExternalReference(isolate->virtual_handler_register_address());
@ -1467,17 +1435,20 @@ ExternalReference ExternalReference::runtime_function_table_address(
}
double power_helper(double x, double y) {
double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
lazily_initialize_fast_sqrt(isolate);
return (std::isinf(x)) ? V8_INFINITY
: fast_sqrt(x + 0.0); // Convert -0 to +0.
: fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
}
if (y == -0.5) {
return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
lazily_initialize_fast_sqrt(isolate);
return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0,
isolate); // Convert -0 to +0.
}
return power_double_double(x, y);
}
@ -1575,9 +1546,9 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
}
ExternalReference ExternalReference::debug_step_in_fp_address(
ExternalReference ExternalReference::debug_step_in_enabled_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->step_in_fp_addr());
return ExternalReference(isolate->debug()->step_in_enabled_address());
}
@ -1891,11 +1862,10 @@ void Assembler::RecordGeneratorContinuation() {
}
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode, int call_argc) {
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
intptr_t data = static_cast<intptr_t>(call_argc);
RecordRelocInfo(mode, data);
RecordRelocInfo(mode);
}

74
deps/v8/src/assembler.h

@ -38,8 +38,8 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/token.h"
namespace v8 {
@ -55,6 +55,9 @@ class StatsCounter;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
enum class CodeObjectRequired { kNo, kYes };
class AssemblerBase: public Malloced {
public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
@ -107,9 +110,6 @@ class AssemblerBase: public Malloced {
static void FlushICache(Isolate* isolate, void* start, size_t size);
// TODO(all): Help get rid of this one.
static void FlushICacheWithoutIsolate(void* start, size_t size);
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
@ -233,17 +233,18 @@ class CpuFeatures : public AllStatic {
static void PrintTarget();
static void PrintFeatures();
private:
friend class ExternalReference;
friend class AssemblerBase;
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
private:
// Platform-dependent implementation.
static void ProbeImpl(bool cross_compile);
static unsigned supported_;
static unsigned cache_line_size_;
static bool initialized_;
friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -376,7 +377,6 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@ -391,7 +391,6 @@ class RelocInfo {
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@ -428,19 +427,19 @@ class RelocInfo {
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
RelocInfo() {}
explicit RelocInfo(Isolate* isolate) : isolate_(isolate) {
DCHECK_NOT_NULL(isolate);
}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host)
: isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) {
DCHECK_NOT_NULL(isolate);
}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
@ -484,8 +483,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
IsDebugBreakSlotAtCall(mode) ||
IsDebugBreakSlotAtConstructCall(mode);
IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@ -496,9 +494,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
static inline bool IsDebugBreakSlotAtConstructCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
}
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
@ -514,6 +509,7 @@ class RelocInfo {
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
Isolate* isolate() const { return isolate_; }
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
@ -536,9 +532,6 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
static int DebugBreakCallArgumentsCount(intptr_t data);
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
@ -621,9 +614,6 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with a call.
void PatchCodeWithCall(Address target, int guard_bytes);
// Check whether this return sequence has been patched
// with a call to the debugger.
INLINE(bool IsPatchedReturnSequence());
@ -651,12 +641,13 @@ class RelocInfo {
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
static const int kDebugBreakSlotMask =
1 << DEBUG_BREAK_SLOT_AT_POSITION | 1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL | 1 << DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
// referencing the constant pool entry (except when rmode_ ==
@ -866,7 +857,8 @@ class ExternalReference BASE_EMBEDDED {
static void InitializeMathExpData();
static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
ExternalReference() : address_(NULL) {}
@ -984,8 +976,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
static ExternalReference debug_restarter_frame_function_pointer_address(
Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
@ -999,7 +989,7 @@ class ExternalReference BASE_EMBEDDED {
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address(Isolate* isolate);
static ExternalReference debug_step_in_enabled_address(Isolate* isolate);
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
@ -1043,9 +1033,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector == NULL) ?
address :
(*redirector)(address, type);
void* answer =
(redirector == NULL) ? address : (*redirector)(isolate, address, type);
return answer;
}
@ -1134,7 +1123,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_helper(double x, double y);
double power_helper(Isolate* isolate, double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
@ -1150,8 +1139,11 @@ class CallWrapper {
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
// Return whether call needs to check for debug stepping.
virtual bool NeedsDebugStepCheck() const { return false; }
};
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
@ -1161,6 +1153,16 @@ class NullCallWrapper : public CallWrapper {
};
class CheckDebugStepCallWrapper : public CallWrapper {
public:
CheckDebugStepCallWrapper() {}
virtual ~CheckDebugStepCallWrapper() {}
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {}
virtual bool NeedsDebugStepCheck() const { return true; }
};
// -----------------------------------------------------------------------------
// Constant pool support

7
deps/v8/src/ast/OWNERS

@ -0,0 +1,7 @@
set noparent
adamk@chromium.org
bmeurer@chromium.org
littledan@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org

409
deps/v8/src/ast/ast-expression-rewriter.cc

@ -0,0 +1,409 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast.h"
#include "src/ast/ast-expression-rewriter.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// Implementation of AstExpressionRewriter
// The AST is traversed but no actual rewriting takes place, unless the
// Visit methods are overriden in subclasses.
#define REWRITE_THIS(node) \
do { \
if (!RewriteExpression(node)) return; \
} while (false)
#define NOTHING() DCHECK_NULL(replacement_)
void AstExpressionRewriter::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
AST_REWRITE_LIST_ELEMENT(Declaration, declarations, i);
}
}
void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
// Not stopping when a jump statement is found.
}
}
void AstExpressionRewriter::VisitExpressions(
ZoneList<Expression*>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
// The variable statement visiting code may pass NULL expressions
// to this code. Maybe this should be handled by introducing an
// undefined expression or literal? Revisit this code if this
// changes
if (expressions->at(i) != nullptr) {
AST_REWRITE_LIST_ELEMENT(Expression, expressions, i);
}
}
}
void AstExpressionRewriter::VisitVariableDeclaration(
VariableDeclaration* node) {
// Not visiting `proxy_`.
NOTHING();
}
void AstExpressionRewriter::VisitFunctionDeclaration(
FunctionDeclaration* node) {
// Not visiting `proxy_`.
AST_REWRITE_PROPERTY(FunctionLiteral, node, fun);
}
void AstExpressionRewriter::VisitImportDeclaration(ImportDeclaration* node) {
// Not visiting `proxy_`.
NOTHING();
}
void AstExpressionRewriter::VisitExportDeclaration(ExportDeclaration* node) {
// Not visiting `proxy_`.
NOTHING();
}
void AstExpressionRewriter::VisitBlock(Block* node) {
VisitStatements(node->statements());
}
void AstExpressionRewriter::VisitExpressionStatement(
ExpressionStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitEmptyStatement(EmptyStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
AST_REWRITE_PROPERTY(Statement, node, statement);
}
void AstExpressionRewriter::VisitIfStatement(IfStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, condition);
AST_REWRITE_PROPERTY(Statement, node, then_statement);
AST_REWRITE_PROPERTY(Statement, node, else_statement);
}
void AstExpressionRewriter::VisitContinueStatement(ContinueStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitBreakStatement(BreakStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitReturnStatement(ReturnStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
AST_REWRITE_PROPERTY(Statement, node, statement);
}
void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, tag);
ZoneList<CaseClause*>* clauses = node->cases();
for (int i = 0; i < clauses->length(); i++) {
AST_REWRITE_LIST_ELEMENT(CaseClause, clauses, i);
}
}
void AstExpressionRewriter::VisitDoWhileStatement(DoWhileStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, cond);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitWhileStatement(WhileStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, cond);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForStatement(ForStatement* node) {
if (node->init() != nullptr) {
AST_REWRITE_PROPERTY(Statement, node, init);
}
if (node->cond() != nullptr) {
AST_REWRITE_PROPERTY(Expression, node, cond);
}
if (node->next() != nullptr) {
AST_REWRITE_PROPERTY(Statement, node, next);
}
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, each);
AST_REWRITE_PROPERTY(Expression, node, subject);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, each);
AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
AST_REWRITE_PROPERTY(Expression, node, next_result);
AST_REWRITE_PROPERTY(Expression, node, result_done);
AST_REWRITE_PROPERTY(Expression, node, assign_each);
AST_REWRITE_PROPERTY(Expression, node, subject);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitTryCatchStatement(TryCatchStatement* node) {
AST_REWRITE_PROPERTY(Block, node, try_block);
// Not visiting the variable.
AST_REWRITE_PROPERTY(Block, node, catch_block);
}
void AstExpressionRewriter::VisitTryFinallyStatement(
TryFinallyStatement* node) {
AST_REWRITE_PROPERTY(Block, node, try_block);
AST_REWRITE_PROPERTY(Block, node, finally_block);
}
void AstExpressionRewriter::VisitDebuggerStatement(DebuggerStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitFunctionLiteral(FunctionLiteral* node) {
REWRITE_THIS(node);
VisitDeclarations(node->scope()->declarations());
ZoneList<Statement*>* body = node->body();
if (body != nullptr) VisitStatements(body);
}
void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
REWRITE_THIS(node);
// Not visiting `class_variable_proxy_`.
if (node->extends() != nullptr) {
AST_REWRITE_PROPERTY(Expression, node, extends);
}
AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
VisitObjectLiteralProperty(properties->at(i));
}
}
void AstExpressionRewriter::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitConditional(Conditional* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, condition);
AST_REWRITE_PROPERTY(Expression, node, then_expression);
AST_REWRITE_PROPERTY(Expression, node, else_expression);
}
void AstExpressionRewriter::VisitVariableProxy(VariableProxy* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitLiteral(Literal* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitRegExpLiteral(RegExpLiteral* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
REWRITE_THIS(node);
ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
VisitObjectLiteralProperty(properties->at(i));
}
}
void AstExpressionRewriter::VisitObjectLiteralProperty(
ObjectLiteralProperty* property) {
if (property == nullptr) return;
AST_REWRITE_PROPERTY(Expression, property, key);
AST_REWRITE_PROPERTY(Expression, property, value);
}
void AstExpressionRewriter::VisitArrayLiteral(ArrayLiteral* node) {
REWRITE_THIS(node);
VisitExpressions(node->values());
}
void AstExpressionRewriter::VisitAssignment(Assignment* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, target);
AST_REWRITE_PROPERTY(Expression, node, value);
}
void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, generator_object);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitThrow(Throw* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, exception);
}
void AstExpressionRewriter::VisitProperty(Property* node) {
REWRITE_THIS(node);
if (node == nullptr) return;
AST_REWRITE_PROPERTY(Expression, node, obj);
AST_REWRITE_PROPERTY(Expression, node, key);
}
void AstExpressionRewriter::VisitCall(Call* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitCallNew(CallNew* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitCallRuntime(CallRuntime* node) {
REWRITE_THIS(node);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitUnaryOperation(UnaryOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitCountOperation(CountOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitBinaryOperation(BinaryOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, left);
AST_REWRITE_PROPERTY(Expression, node, right);
}
void AstExpressionRewriter::VisitCompareOperation(CompareOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, left);
AST_REWRITE_PROPERTY(Expression, node, right);
}
void AstExpressionRewriter::VisitSpread(Spread* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitThisFunction(ThisFunction* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitSuperPropertyReference(
SuperPropertyReference* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
AST_REWRITE_PROPERTY(Expression, node, home_object);
}
void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
AST_REWRITE_PROPERTY(VariableProxy, node, new_target_var);
AST_REWRITE_PROPERTY(VariableProxy, node, this_function_var);
}
void AstExpressionRewriter::VisitCaseClause(CaseClause* node) {
if (!node->is_default()) {
AST_REWRITE_PROPERTY(Expression, node, label);
}
VisitStatements(node->statements());
}
void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Block, node, block);
AST_REWRITE_PROPERTY(VariableProxy, node, result);
}
void AstExpressionRewriter::VisitRewritableAssignmentExpression(
RewritableAssignmentExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
} // namespace internal
} // namespace v8

54
deps/v8/src/ast/ast-expression-rewriter.h

@ -0,0 +1,54 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_EXPRESSION_REWRITER_H_
#define V8_AST_AST_EXPRESSION_REWRITER_H_
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/effects.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
// A rewriting Visitor over a CompilationInfo's AST that invokes
// VisitExpression on each expression node.
class AstExpressionRewriter : public AstVisitor {
public:
explicit AstExpressionRewriter(Isolate* isolate) : AstVisitor() {
InitializeAstRewriter(isolate);
}
explicit AstExpressionRewriter(uintptr_t stack_limit) : AstVisitor() {
InitializeAstRewriter(stack_limit);
}
~AstExpressionRewriter() override {}
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitStatements(ZoneList<Statement*>* statements) override;
void VisitExpressions(ZoneList<Expression*>* expressions) override;
virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
protected:
virtual bool RewriteExpression(Expression* expr) = 0;
private:
DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AstExpressionRewriter);
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_EXPRESSION_REWRITER_H_

25
deps/v8/src/ast-expression-visitor.cc → deps/v8/src/ast/ast-expression-visitor.cc

@ -4,11 +4,11 @@
#include "src/v8.h"
#include "src/ast-expression-visitor.h"
#include "src/ast/ast-expression-visitor.h"
#include "src/ast.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/codegen.h"
#include "src/scopes.h"
namespace v8 {
namespace internal {
@ -171,6 +171,11 @@ void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
RECURSE(Visit(stmt->iterable()));
RECURSE(Visit(stmt->each()));
RECURSE(Visit(stmt->assign_iterator()));
RECURSE(Visit(stmt->next_result()));
RECURSE(Visit(stmt->result_done()));
RECURSE(Visit(stmt->assign_each()));
RECURSE(Visit(stmt->body()));
}
@ -209,9 +214,10 @@ void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
void AstExpressionVisitor::VisitConditional(Conditional* expr) {
RECURSE(Visit(expr->condition()));
RECURSE(Visit(expr->then_expression()));
RECURSE(Visit(expr->else_expression()));
VisitExpression(expr);
RECURSE_EXPRESSION(Visit(expr->condition()));
RECURSE_EXPRESSION(Visit(expr->then_expression()));
RECURSE_EXPRESSION(Visit(expr->else_expression()));
}
@ -393,5 +399,12 @@ void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
}
void AstExpressionVisitor::VisitRewritableAssignmentExpression(
RewritableAssignmentExpression* expr) {
VisitExpression(expr);
RECURSE(Visit(expr->expression()));
}
} // namespace internal
} // namespace v8

10
deps/v8/src/ast-expression-visitor.h → deps/v8/src/ast/ast-expression-visitor.h

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_EXPRESSION_VISITOR_H_
#define V8_AST_EXPRESSION_VISITOR_H_
#ifndef V8_AST_AST_EXPRESSION_VISITOR_H_
#define V8_AST_AST_EXPRESSION_VISITOR_H_
#include "src/allocation.h"
#include "src/ast.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/effects.h"
#include "src/scopes.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
@ -47,4 +47,4 @@ class AstExpressionVisitor : public AstVisitor {
} // namespace internal
} // namespace v8
#endif // V8_AST_EXPRESSION_VISITOR_H_
#endif // V8_AST_AST_EXPRESSION_VISITOR_H_

12
deps/v8/src/ast-literal-reindexer.cc → deps/v8/src/ast/ast-literal-reindexer.cc

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast-literal-reindexer.h"
#include "src/ast/ast-literal-reindexer.h"
#include "src/ast.h"
#include "src/scopes.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@ -76,6 +76,12 @@ void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
}
void AstLiteralReindexer::VisitRewritableAssignmentExpression(
RewritableAssignmentExpression* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitImportDeclaration(ImportDeclaration* node) {
VisitVariableProxy(node->proxy());
}

11
deps/v8/src/ast-literal-reindexer.h → deps/v8/src/ast/ast-literal-reindexer.h

@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_LITERAL_REINDEXER
#define V8_AST_LITERAL_REINDEXER
#ifndef V8_AST_AST_LITERAL_REINDEXER
#define V8_AST_AST_LITERAL_REINDEXER
#include "src/ast.h"
#include "src/scopes.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@ -17,7 +17,6 @@ class AstLiteralReindexer final : public AstVisitor {
int count() const { return next_index_; }
void Reindex(Expression* pattern);
int NextIndex() { return next_index_++; }
private:
#define DEFINE_VISIT(type) void Visit##type(type* node) override;
@ -42,4 +41,4 @@ class AstLiteralReindexer final : public AstVisitor {
} // namespace internal
} // namespace v8
#endif // V8_AST_LITERAL_REINDEXER
#endif // V8_AST_AST_LITERAL_REINDEXER

27
deps/v8/src/ast-numbering.cc → deps/v8/src/ast/ast-numbering.cc

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast-numbering.h"
#include "src/ast/ast-numbering.h"
#include "src/ast.h"
#include "src/scopes.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@ -181,7 +181,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
DisableOptimization(kSuperReference);
DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var());
Visit(node->home_object());
@ -190,7 +190,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
DisableOptimization(kSuperReference);
DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
Visit(node->new_target_var());
@ -348,6 +348,7 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
Visit(node->value());
@ -373,7 +374,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
DisableOptimization(kSpread);
DisableCrankshaft(kSpread);
Visit(node->expression());
}
@ -556,6 +557,14 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
}
void AstNumberingVisitor::VisitRewritableAssignmentExpression(
RewritableAssignmentExpression* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RewritableAssignmentExpression::num_ids()));
Visit(node->expression());
}
bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
@ -571,11 +580,17 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableOptimization(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
}
int rest_index;
if (scope->rest_parameter(&rest_index)) {
DisableCrankshaft(kRestParameter);
}
VisitDeclarations(scope->declarations());
VisitStatements(node->body());

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save