Browse Source

deps: upgrade V8 to 4.5.103.24

Upgrade to the latest branch-head for V8 4.5. For the full commit log see
https://github.com/v8/v8-git-mirror/commits/4.5.103.24

PR-URL: https://github.com/nodejs/node/pull/2509
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
v4.x
Ali Ijaz Sheikh 10 years ago
committed by Rod Vagg
parent
commit
c43172578e
  1. 2
      deps/v8/.gitignore
  2. 76
      deps/v8/BUILD.gn
  3. 675
      deps/v8/ChangeLog
  4. 12
      deps/v8/DEPS
  5. 7
      deps/v8/LICENSE
  6. 21
      deps/v8/Makefile
  7. 65
      deps/v8/Makefile.android
  8. 266
      deps/v8/build/android.gypi
  9. 12
      deps/v8/build/features.gypi
  10. 2
      deps/v8/build/get_landmines.py
  11. 11
      deps/v8/build/gyp_v8
  12. 520
      deps/v8/build/standalone.gypi
  13. 44
      deps/v8/build/toolchain.gypi
  14. 41
      deps/v8/include/v8-debug.h
  15. 11
      deps/v8/include/v8-platform.h
  16. 28
      deps/v8/include/v8-profiler.h
  17. 6
      deps/v8/include/v8-version.h
  18. 1089
      deps/v8/include/v8.h
  19. 53
      deps/v8/include/v8config.h
  20. 3
      deps/v8/infra/OWNERS
  21. 1
      deps/v8/infra/README.md
  22. 0
      deps/v8/infra/config/OWNERS
  23. 52
      deps/v8/infra/config/cq.cfg
  24. 1
      deps/v8/infra/project-config/README.md
  25. 23
      deps/v8/infra/project-config/cr-buildbucket.cfg
  26. 71
      deps/v8/samples/hello-world.cc
  27. 202
      deps/v8/samples/process.cc
  28. 10
      deps/v8/samples/samples.gyp
  29. 151
      deps/v8/samples/shell.cc
  30. 173
      deps/v8/src/accessors.cc
  31. 5
      deps/v8/src/accessors.h
  32. 3
      deps/v8/src/allocation-site-scopes.cc
  33. 3
      deps/v8/src/allocation-tracker.cc
  34. 3
      deps/v8/src/allocation.cc
  35. 100
      deps/v8/src/api-natives.cc
  36. 1231
      deps/v8/src/api.cc
  37. 15
      deps/v8/src/api.h
  38. 28
      deps/v8/src/arguments.cc
  39. 12
      deps/v8/src/arguments.h
  40. 27
      deps/v8/src/arm/assembler-arm-inl.h
  41. 634
      deps/v8/src/arm/assembler-arm.cc
  42. 139
      deps/v8/src/arm/assembler-arm.h
  43. 143
      deps/v8/src/arm/builtins-arm.cc
  44. 227
      deps/v8/src/arm/code-stubs-arm.cc
  45. 3
      deps/v8/src/arm/codegen-arm.cc
  46. 3
      deps/v8/src/arm/constants-arm.cc
  47. 5
      deps/v8/src/arm/constants-arm.h
  48. 3
      deps/v8/src/arm/cpu-arm.cc
  49. 52
      deps/v8/src/arm/debug-arm.cc
  50. 5
      deps/v8/src/arm/deoptimizer-arm.cc
  51. 4
      deps/v8/src/arm/disasm-arm.cc
  52. 14
      deps/v8/src/arm/frames-arm.cc
  53. 54
      deps/v8/src/arm/frames-arm.h
  54. 853
      deps/v8/src/arm/full-codegen-arm.cc
  55. 296
      deps/v8/src/arm/interface-descriptors-arm.cc
  56. 79
      deps/v8/src/arm/lithium-arm.cc
  57. 83
      deps/v8/src/arm/lithium-arm.h
  58. 290
      deps/v8/src/arm/lithium-codegen-arm.cc
  59. 6
      deps/v8/src/arm/lithium-codegen-arm.h
  60. 3
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  61. 76
      deps/v8/src/arm/macro-assembler-arm.cc
  62. 10
      deps/v8/src/arm/macro-assembler-arm.h
  63. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  64. 29
      deps/v8/src/arm/simulator-arm.cc
  65. 2
      deps/v8/src/arm/simulator-arm.h
  66. 25
      deps/v8/src/arm64/assembler-arm64-inl.h
  67. 101
      deps/v8/src/arm64/assembler-arm64.cc
  68. 40
      deps/v8/src/arm64/assembler-arm64.h
  69. 117
      deps/v8/src/arm64/builtins-arm64.cc
  70. 234
      deps/v8/src/arm64/code-stubs-arm64.cc
  71. 6
      deps/v8/src/arm64/code-stubs-arm64.h
  72. 3
      deps/v8/src/arm64/codegen-arm64.cc
  73. 2
      deps/v8/src/arm64/constants-arm64.h
  74. 3
      deps/v8/src/arm64/cpu-arm64.cc
  75. 50
      deps/v8/src/arm64/debug-arm64.cc
  76. 3
      deps/v8/src/arm64/decoder-arm64.cc
  77. 3
      deps/v8/src/arm64/delayed-masm-arm64.cc
  78. 5
      deps/v8/src/arm64/deoptimizer-arm64.cc
  79. 24
      deps/v8/src/arm64/disasm-arm64.cc
  80. 9
      deps/v8/src/arm64/frames-arm64.cc
  81. 30
      deps/v8/src/arm64/frames-arm64.h
  82. 855
      deps/v8/src/arm64/full-codegen-arm64.cc
  83. 30
      deps/v8/src/arm64/instructions-arm64.cc
  84. 14
      deps/v8/src/arm64/instructions-arm64.h
  85. 3
      deps/v8/src/arm64/instrument-arm64.cc
  86. 311
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  87. 80
      deps/v8/src/arm64/lithium-arm64.cc
  88. 83
      deps/v8/src/arm64/lithium-arm64.h
  89. 329
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  90. 22
      deps/v8/src/arm64/lithium-codegen-arm64.h
  91. 3
      deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
  92. 24
      deps/v8/src/arm64/macro-assembler-arm64.cc
  93. 4
      deps/v8/src/arm64/macro-assembler-arm64.h
  94. 3
      deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
  95. 1
      deps/v8/src/arm64/regexp-macro-assembler-arm64.h
  96. 169
      deps/v8/src/arm64/simulator-arm64.cc
  97. 17
      deps/v8/src/arm64/simulator-arm64.h
  98. 5
      deps/v8/src/arm64/utils-arm64.cc
  99. 43
      deps/v8/src/arm64/utils-arm64.h
  100. 20
      deps/v8/src/array-iterator.js

2
deps/v8/.gitignore

@ -60,6 +60,8 @@ shell_g
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/simdjs/ecmascript_simd*
/test/simdjs/data*
/test/test262/data
/test/test262/data.old
/test/test262/tc39-test262-*

76
deps/v8/BUILD.gn

@ -52,7 +52,7 @@ config("internal_config") {
include_dirs = [ "." ]
if (component_mode == "shared_library") {
if (is_component_build) {
defines = [
"V8_SHARED",
"BUILDING_V8_SHARED",
@ -204,6 +204,7 @@ action("js2c") {
"src/macros.py",
"src/messages.h",
"src/runtime.js",
"src/prologue.js",
"src/v8natives.js",
"src/symbol.js",
"src/array.js",
@ -215,6 +216,7 @@ action("js2c") {
"src/regexp.js",
"src/arraybuffer.js",
"src/typedarray.js",
"src/iterator-prototype.js",
"src/generator.js",
"src/object-observe.js",
"src/collection.js",
@ -229,6 +231,8 @@ action("js2c") {
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/templates.js",
"src/harmony-array.js",
"src/harmony-typedarray.js",
]
outputs = [
@ -267,14 +271,15 @@ action("js2c_experimental") {
"src/messages.h",
"src/proxy.js",
"src/generator.js",
"src/harmony-array.js",
"src/harmony-atomics.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
"src/harmony-concat-spreadable.js",
"src/harmony-tostring.js",
"src/harmony-regexp.js",
"src/harmony-reflect.js",
"src/harmony-spread.js",
"src/harmony-object.js"
"src/harmony-object.js",
"src/harmony-sharedarraybuffer.js"
]
outputs = [
@ -474,9 +479,13 @@ source_set("v8_snapshot") {
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
":run_mksnapshot",
]
sources = [
"$target_gen_dir/libraries.cc",
@ -502,9 +511,11 @@ if (v8_use_external_startup_data) {
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
]
public_deps = [
":natives_blob",
":run_mksnapshot",
]
sources = [
@ -526,6 +537,14 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"include/v8-debug.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
"include/v8-util.h",
"include/v8-version.h",
"include/v8.h",
"include/v8config.h",
"src/accessors.cc",
"src/accessors.h",
"src/allocation.cc",
@ -544,6 +563,8 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast-literal-reindexer.cc",
"src/ast-literal-reindexer.h",
"src/ast-numbering.cc",
"src/ast-numbering.h",
"src/ast-value-factory.cc",
@ -602,6 +623,8 @@ source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
@ -617,8 +640,8 @@ source_set("v8_base") {
"src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc",
"src/compiler/control-flow-optimizer.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
@ -632,10 +655,14 @@ source_set("v8_base") {
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h",
"src/compiler/graph-trimmer.cc",
"src/compiler/graph-trimmer.h",
"src/compiler/graph-visualizer.cc",
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
@ -646,6 +673,8 @@ source_set("v8_base") {
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
"src/compiler/js-frame-specialization.cc",
"src/compiler/js-frame-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc",
@ -774,6 +803,7 @@ source_set("v8_base") {
"src/elements.h",
"src/execution.cc",
"src/execution.h",
"src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@ -830,6 +860,8 @@ source_set("v8_base") {
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
@ -958,12 +990,11 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/pattern-rewriter.cc",
"src/parser.cc",
"src/parser.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
@ -992,11 +1023,13 @@ source_set("v8_base") {
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
@ -1032,6 +1065,7 @@ source_set("v8_base") {
"src/scopes.cc",
"src/scopes.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot/natives.h",
@ -1040,6 +1074,8 @@ source_set("v8_base") {
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/splay-tree.h",
"src/splay-tree-inl.h",
"src/snapshot/snapshot.h",
"src/string-builder.cc",
"src/string-builder.h",
@ -1089,6 +1125,8 @@ source_set("v8_base") {
"src/vm-state.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
"src/zone-containers.h",
"src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h",
]
@ -1201,6 +1239,7 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
@ -1295,6 +1334,7 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc",
@ -1336,6 +1376,7 @@ source_set("v8_base") {
"src/mips64/regexp-macro-assembler-mips64.cc",
"src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
@ -1399,6 +1440,8 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
@ -1558,7 +1601,7 @@ if (current_toolchain == snapshot_toolchain) {
# Public targets
#
if (component_mode == "shared_library") {
if (is_component_build) {
component("v8") {
sources = [
"src/v8dll-main.cc",
@ -1567,11 +1610,17 @@ if (component_mode == "shared_library") {
if (v8_use_snapshot && v8_use_external_startup_data) {
deps = [
":v8_base",
]
public_deps = [
":v8_external_snapshot",
]
} else if (v8_use_snapshot) {
deps = [
":v8_base",
]
# v8_snapshot should be public so downstream targets can declare the
# snapshot file as their input.
public_deps = [
":v8_snapshot",
]
} else {
@ -1607,6 +1656,8 @@ if (component_mode == "shared_library") {
} else if (v8_use_snapshot) {
deps = [
":v8_base",
]
public_deps = [
":v8_snapshot",
]
} else {
@ -1657,9 +1708,10 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
sources += [ "src/d8-windows.cc" ]
}
if (component_mode != "shared_library") {
if (!is_component_build) {
sources += [
"src/d8-debug.cc",
"src/d8-debug.h",
"$target_gen_dir/d8-js.cc",
]
}

675
deps/v8/ChangeLog

@ -1,3 +1,678 @@
2015-07-08: Version 4.5.103
Performance and stability improvements on all platforms.
2015-07-08: Version 4.5.102
Performance and stability improvements on all platforms.
2015-07-07: Version 4.5.101
Move compatible receiver check from CompileHandler to UpdateCaches
(Chromium issue 505374).
Performance and stability improvements on all platforms.
2015-07-07: Version 4.5.100
Performance and stability improvements on all platforms.
2015-07-07: Version 4.5.99
unicode-decoder: fix out-of-band write in utf16 (issue 4274).
Performance and stability improvements on all platforms.
2015-07-06: Version 4.5.98
Performance and stability improvements on all platforms.
2015-07-04: Version 4.5.97
Performance and stability improvements on all platforms.
2015-07-03: Version 4.5.96
Performance and stability improvements on all platforms.
2015-07-03: Version 4.5.95
Performance and stability improvements on all platforms.
2015-07-02: Version 4.5.94
Performance and stability improvements on all platforms.
2015-07-02: Version 4.5.93
Include Harmony Array/TypedArray methods unconditionally (Chromium issue
504629).
Performance and stability improvements on all platforms.
2015-07-02: Version 4.5.92
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.91
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.90
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.89
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.88
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.87
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.86
Ensure mjsunit tests use dashes not underscores in flags directives
(Chromium issue 505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.85
Fix flag convention in handle count tests and comment (Chromium issue
505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.84
Performance and stability improvements on all platforms.
2015-06-27: Version 4.5.83
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.82
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.81
Remove obsolete options in ScriptCompiler::CompileOptions (Chromium
issue 399580).
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.80
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.79
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.78
Serializer: clear next link in weak cells (Chromium issue 503552).
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.77
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.76
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.75
Date() should not depend on Date.prototype.toString (issue 4225).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.74
Expose Map/Set methods through the API (issue 3340).
[turbofan] NaN is never truish (issue 4207).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.73
Re-ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.72
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.71
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.70
Ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.69
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.68
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.67
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.66
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.65
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.64
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.63
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.62
Hydrogen object literals: always initialize in-object properties
(Chromium issue 500497).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.61
Add %TypedArray% to proto chain (issue 4085).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.60
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.59
[crankshaft] Fix wrong bailout points in for-in loop body (Chromium
issue 500435).
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.58
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.57
Inline code generation for %_IsTypedArray (issue 4085).
Allow TypedArrays to be initialized with iterables (issue 4090).
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.56
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.55
Performance and stability improvements on all platforms.
2015-06-14: Version 4.5.54
Performance and stability improvements on all platforms.
2015-06-13: Version 4.5.53
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.52
Map::TryUpdate() must be in sync with Map::Update() (issue 4173).
Add ToObject call in Array.prototype.sort (issue 4125).
In Array.of and Array.from, fall back to DefineOwnProperty (issue 4168).
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.51
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.50
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.49
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.48
Support rest parameters in arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-10: Version 4.5.47
Implement %TypedArray%.prototype.slice (issue 3578).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.46
Stage ES6 arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.45
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.44
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.43
[for-in] Make ForInNext and ForInFilter deal properly with exceptions
(Chromium issue 496331).
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.42
Performance and stability improvements on all platforms.
2015-06-06: Version 4.5.41
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.40
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.39
Stage ES6 Array and TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.38
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.37
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.36
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.35
Flatten the Arrays returned and consumed by the v8::Map API (Chromium
issue 478263).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.34
Also allocate small typed arrays on heap when initialized from an array-
like (issue 3996).
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.33
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Implement %TypedArray%.prototype.{toString,toLocaleString,join} (issue
3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.32
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.31
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.30
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.29
Reland "Re-enable on-heap typed array allocation" (issue 3996).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.28
Re-enable on-heap typed array allocation (issue 3996).
Also expose DefineOwnProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.27
Performance and stability improvements on all platforms.
2015-05-31: Version 4.5.26
Performance and stability improvements on all platforms.
2015-05-30: Version 4.5.25
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.24
Debugger: consider try-finally scopes not catching wrt debug events
(Chromium issue 492522).
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.23
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.22
Do not eagerly convert exception to string when creating a message
object (Chromium issue 490680).
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.21
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.20
Introduce v8::Object::CreateDataProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.19
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.18
Add {Map,Set}::FromArray to the API (issue 3340).
Add {Map,Set}::AsArray to the API (issue 3340).
Add basic API support for Map & Set (issue 3340).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.17
Correctly hook up materialized receiver into the evaluation context
chain (Chromium issue 491943).
Implement bookmarks for ExternalStreamingStream (Chromium issue 470930).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.16
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.15
Performance and stability improvements on all platforms.
2015-05-23: Version 4.5.14
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.13
Remove v8::Private.
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.12
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.11
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.10
Re-land %TypedArray%.prototype.{map,filter,some} (issue 3578).
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.9
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.8
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.7
Implement %TypedArray%.{lastI,i}ndexOf (issue 3578).
Implement %TypedArray%.prototype.sort (issue 3578).
Implement %TypedArray%.reverse (issue 3578).
Implement %TypedArray%.prototype.{map,filter,some,reduce,reduceRight}
(issue 3578).
Fix has_pending_exception logic in API's Array::CloneElementAt (issue
4103).
Adding api to get last gc object statistics for chrome://tracing
(Chromium issue 476013).
Fix harmless HGraph verification failure after hoisting inlined bounds
checks (Chromium issue 487608).
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.6
Add TypedArray.from method (issue 3578).
Performance and stability improvements on all platforms.
2015-05-19: Version 4.5.5
ARM64: Propagate notification about aborted compilation from
RegExpEngine to MacroAssembler (Chromium issue 489290).
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.4
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.3
Performance and stability improvements on all platforms.
2015-05-17: Version 4.5.2
Performance and stability improvements on all platforms.
2015-05-16: Version 4.5.1
Test that TypedArray methods don't read length (issue 3578).
Implement %TypedArray%.{fill,find,findIndex} (issue 3578).
TypedArray.prototype.copyWithin method (issue 3578).
Provide accessor for object internal properties that doesn't require
debugger to be active (Chromium issue 481845).
Don't create debug context if debug listener is not set (Chromium issue
482290).
Performance and stability improvements on all platforms.
2015-05-13: Version 4.4.65
Deprecate Isolate::New.
Factor out core of Array.forEach and .every, for use in TypedArrays
(issue 3578).
Performance and stability improvements on all platforms.
2015-05-12: Version 4.4.64
Performance and stability improvements on all platforms.
2015-05-11: Version 4.4.63
Let Runtime_GrowArrayElements accept non-Smi numbers as |key| (Chromium

12
deps/v8/DEPS

@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "0bb67471bca068996e15b56738fa4824dfa19de0",
Var("git_url") + "/external/gyp.git" + "@" + "5122240c5e5c4d8da12c543d82b03d6089eb77c5",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "f8c0e585b0a046d83d72b5d37356cb50d5b2031a",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "c81a1a3989c3b66fa323e9a6ee7418d7c08297af",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "b0ede9c89f9d5fbe5387d961ad4c0ec665b6c821",
Var("git_url") + "/chromium/buildtools.git" + "@" + "ecc8e253abac3b6186a97573871a084f4c0ca3ae",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
Var("git_url") + "/external/googletest.git" + "@" + "23574bf2333f834ff665f894c97bef8a5b33a0a9",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5bab78c6ced45a71a8e095a09697ca80492e57e1",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "73ec8804ed395b0886d6edf82a9f33583f4a7902",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "4f723e2a5fa5b7b8a198072ac19b92344be2b271",
Var("git_url") + "/android_tools.git" + "@" + "21f4bcbd6cd927e4b4227cfde7d5f13486be1236",
},
"win": {
"v8/third_party/cygwin":

7
deps/v8/LICENSE

@ -3,12 +3,12 @@ maintained libraries. The externally maintained libraries used by V8
are:
- PCRE test suite, located in
test/mjsunit/third_party/regexp-pcre.js. This is based on the
test/mjsunit/third_party/regexp-pcre/regexp-pcre.js. This is based on the
test suite from PCRE-7.3, which is copyrighted by the University
of Cambridge and Google, Inc. The copyright notice and license
are embedded in regexp-pcre.js.
- Layout tests, located in test/mjsunit/third_party. These are
- Layout tests, located in test/mjsunit/third_party/object-keys. These are
based on layout tests from webkit.org which are copyrighted by
Apple Computer, Inc. and released under a 3-clause BSD license.
@ -26,6 +26,9 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
Further license information can be found in LICENSE files located in
sub-directories.
Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

21
deps/v8/Makefile

@ -31,9 +31,7 @@ OUTDIR ?= out
TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?=
@ -145,10 +143,14 @@ ifeq ($(i18nsupport), off)
GYPFLAGS += -Dv8_enable_i18n_support=0
TESTFLAGS += --noi18n
endif
# deprecation_warnings=on
# deprecationwarnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# imminentdeprecationwarnings=on
ifeq ($(imminentdeprecationwarnings), on)
GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
endif
# asan=on
ifeq ($(asan), on)
GYPFLAGS += -Dasan=1 -Dclang=1
@ -246,7 +248,7 @@ NACL_ARCHES = nacl_ia32 nacl_x64
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
build/shim_headers.gypi build/features.gypi build/standalone.gypi \
build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
build/android.gypi test/cctest/cctest.gyp \
test/cctest/cctest.gyp \
test/unittests/unittests.gyp tools/gyp/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
@ -277,7 +279,6 @@ ENVFILE = $(OUTDIR)/environment
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN \
$(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
must-set-NACL_SDK_ROOT
@ -311,8 +312,7 @@ native: $(OUTDIR)/Makefile.native
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN Makefile.android
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
@$(MAKE) -f Makefile.android $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
@ -448,13 +448,6 @@ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
ifndef ANDROID_NDK_ROOT
ifndef ANDROID_TOOLCHAIN
$(error ANDROID_NDK_ROOT or ANDROID_TOOLCHAIN must be set))
endif
endif
# Note that NACL_SDK_ROOT must be set to point to an appropriate
# Native Client SDK before using this makefile. You can download
# an SDK here:

65
deps/v8/Makefile.android

@ -35,75 +35,28 @@ MODES = release debug
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm android_target_platform=14
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=arm v8_target_arch=arm
else ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=21
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.9
DEFINES = target_arch=arm64 v8_target_arch=arm64
else ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=mipsel v8_target_arch=mipsel
else ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=ia32 v8_target_arch=ia32
else ifeq ($(ARCH), android_x64)
DEFINES = target_arch=x64 v8_target_arch=x64 android_target_arch=x86_64 android_target_platform=21
TOOLCHAIN_ARCH = x86_64
TOOLCHAIN_PREFIX = x86_64-linux-android
TOOLCHAIN_VER = 4.9
DEFINES = target_arch=x64 v8_target_arch=x64
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=ia32 v8_target_arch=x87
else
$(error Target architecture "${ARCH}" is not supported)
endif
TOOLCHAIN_PATH = \
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
correctly)
endif
# For mksnapshot host generation.
DEFINES += host_os=${HOST_OS}
# Common flags.
DEFINES += OS=android
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
@ -113,9 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-Ibuild/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

266
deps/v8/build/android.gypi

@ -1,266 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Definitions for building standalone V8 binaries to run on Android.
# This is mostly excerpted from:
# http://src.chromium.org/viewvc/chrome/trunk/src/build/common.gypi
{
'variables': {
# Location of Android NDK.
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
},
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}, # variables
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!($CC -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}

12
deps/v8/build/features.gypi

@ -59,6 +59,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
# Enable compiler warnings when using V8_DEPRECATE_SOON apis.
'v8_imminent_deprecation_warnings%': 0,
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
},
@ -88,10 +91,13 @@
['v8_deprecation_warnings==1', {
'defines': ['V8_DEPRECATION_WARNINGS',],
}],
['v8_imminent_deprecation_warnings==1', {
'defines': ['V8_IMMINENT_DEPRECATION_WARNINGS',],
}],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_use_external_startup_data==1', {
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
}],
['dcheck_always_on!=0', {
@ -102,7 +108,7 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
'v8_enable_handle_zapping%': 1,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@ -112,7 +118,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {

2
deps/v8/build/get_landmines.py

@ -21,6 +21,8 @@ def main():
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
return 0

11
deps/v8/build/gyp_v8

@ -130,7 +130,7 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
gyp_generators = os.environ.get('GYP_GENERATORS')
gyp_generators = os.environ.get('GYP_GENERATORS', '')
if platform.system() == 'Linux' and gyp_generators != 'ninja':
# Work around for crbug.com/331475.
for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
@ -140,4 +140,13 @@ if __name__ == '__main__':
# -Goutput_dir defines where the build output goes, relative to the
# Makefile. Set it to . so that the build output doesn't end up in out/out.
gyp_args.append('-Goutput_dir=.')
gyp_defines = os.environ.get('GYP_DEFINES', '')
# Automatically turn on crosscompile support for platforms that need it.
if all(('ninja' in gyp_generators,
'OS=android' in gyp_defines,
'GYP_CROSSCOMPILE' not in os.environ)):
os.environ['GYP_CROSSCOMPILE'] = '1'
run_gyp(gyp_args)

520
deps/v8/build/standalone.gypi

@ -33,16 +33,17 @@
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'clang_dir%': 'third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0,
# Track where uninitialized memory originates from. From fastest to
# slowest: 0 - no tracking, 1 - track only the initial allocation site, 2
# - track the chain of stores leading from allocation site to use site.
'msan_track_origins%': 1,
'msan_track_origins%': 2,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
# TODO(jochen): Turn this on.
'v8_imminent_deprecation_warnings%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
@ -66,7 +67,9 @@
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
@ -74,6 +77,16 @@
'lsan%': 0,
'msan%': 0,
'tsan%': 0,
# Enable coverage gathering instrumentation in sanitizer tools. This flag
# also controls coverage granularity (1 for function-level, 2 for
# block-level, 3 for edge-level).
'sanitizer_coverage%': 0,
# Use libc++ (buildtools/third_party/libc++ and
# buildtools/third_party/libc++abi) instead of stdlibc++ as standard
# library. This is intended to be used for instrumented builds.
'use_custom_libcxx%': 0,
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
# goma settings.
# 1 to use goma.
@ -87,9 +100,17 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
],
},
'base_dir%': '<(base_dir)',
'clang_dir%': '<(clang_dir)',
'host_arch%': '<(host_arch)',
'host_clang%': '<(host_clang)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
@ -99,6 +120,11 @@
'lsan%': '<(lsan)',
'msan%': '<(msan)',
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
# Add a simple extra solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
# .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such
@ -160,20 +186,132 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87")', {
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1,
}, {
'clang%': 0,
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
['asan==1 or lsan==1 or msan==1 or tsan==1', {
'clang%': 1,
'use_allocator%': 'none',
}],
['asan==1 and OS=="linux"', {
'use_custom_libcxx%': 1,
}],
['tsan==1', {
'use_custom_libcxx%': 1,
}],
['msan==1', {
# Use a just-built, MSan-instrumented libc++ instead of the system-wide
# libstdc++. This is required to avoid false positive reports whenever
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
['OS=="linux"', {
# Gradually roll out v8_use_external_startup_data.
# Should eventually be default enabled on all platforms.
'v8_use_external_startup_data%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
'variables': {
'variables': {
# The Android toolchain needs to use the absolute path to the NDK
# because it is used at different levels in the GYP files.
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
},
# Copy conditionally-set variables out one scope.
'android_ndk_root%': '<(android_ndk_root)',
'host_os%': '<(host_os)',
'conditions': [
['target_arch == "ia32"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86',
'android_target_platform%': '16',
}],
['target_arch == "x64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86_64',
'android_target_platform%': '21',
}],
['target_arch=="arm"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm',
'android_target_platform%': '16',
'arm_version%': 7,
}],
['target_arch == "arm64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch == "mipsel"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips',
'android_target_platform%': '16',
}],
['target_arch == "mips64el"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips64',
'android_target_platform%': '21',
}],
],
},
# Copy conditionally-set variables out one scope.
'android_target_arch%': '<(android_target_arch)',
'android_target_platform%': '<(android_target_platform)',
'android_toolchain%': '<(android_toolchain)',
'arm_version%': '<(arm_version)',
'host_os%': '<(host_os)',
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}], # OS=="android"
['host_clang==1', {
'host_cc': '<(clang_dir)/bin/clang',
'host_cxx': '<(clang_dir)/bin/clang++',
}, {
'host_cc': '<!(which gcc)',
'host_cxx': '<!(which g++)',
}],
],
# Default ARM variable settings.
'arm_version%': 'default',
@ -194,6 +332,11 @@
'target_defaults': {
'variables': {
'v8_code%': '<(v8_code)',
'conditions':[
['OS=="android"', {
'host_os%': '<(host_os)',
}],
],
},
'default_configuration': 'Debug',
'configurations': {
@ -283,58 +426,75 @@
],
},
'conditions': [
['asan==1 and OS!="mac"', {
['os_posix==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'conditions': [
# Common options for AddressSanitizer, LeakSanitizer,
# ThreadSanitizer and MemorySanitizer.
['asan==1 or lsan==1 or tsan==1 or msan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'cflags!': [
'-fomit-frame-pointer',
],
}],
],
}],
['asan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=address',
],
'ldflags': [
'-fsanitize=address',
],
},
'defines': [
'ADDRESS_SANITIZER',
],
}],
['tsan==1 and OS!="mac"', {
'target_defaults': {
'cflags+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=thread',
'-fPIC',
'-Wno-c++11-extensions',
],
'cflags!': [
'-fomit-frame-pointer',
}],
['lsan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=leak',
],
'ldflags': [
'-fsanitize=leak',
],
'defines': [
'LEAK_SANITIZER',
],
}],
],
}],
['tsan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=thread',
],
'ldflags': [
'-fsanitize=thread',
'-pie',
],
'defines': [
'THREAD_SANITIZER',
],
},
}],
['msan==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
],
}],
['msan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
'-fPIC',
],
'cflags+': [
'-fPIC',
],
'cflags!': [
'-fno-exceptions',
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=memory',
@ -342,17 +502,35 @@
'defines': [
'MEMORY_SANITIZER',
],
}],
],
}],
['use_custom_libcxx==1', {
'dependencies': [
# Use libc++ (third_party/libc++ and third_party/libc++abi) instead of
# stdlibc++ as standard library. This is intended to use for instrumented
# builds.
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
],
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
}],
],
},
}],
['asan==1 and OS=="mac"', {
['OS=="mac"', {
'target_defaults': {
'conditions': [
['asan==1', {
'xcode_settings': {
# FIXME(machenbach): This is outdated compared to common.gypi.
'OTHER_CFLAGS+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
@ -362,17 +540,34 @@
'OTHER_CFLAGS!': [
'-fomit-frame-pointer',
],
'defines': [
'ADDRESS_SANITIZER',
],
},
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']},
}],
],
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
},
}],
],
}, # target_defaults
}], # OS=="mac"
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="aix"', {
'target_defaults': {
@ -382,17 +577,20 @@
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
'-fno-exceptions',
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'ldflags': [ '-pthread', ],
'conditions': [
# TODO(arm64): It'd be nice to enable this for arm64 as well,
# but the Assembler requires some serious fixing first.
[ 'clang==1 and v8_target_arch=="x64"', {
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
'cflags': [ '-Wshorten-64-to-32' ],
}],
[ 'host_arch=="ppc64" and OS!="aix"', {
@ -415,11 +613,15 @@
'-Wall',
'<(werror)',
'-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'conditions': [
[ 'visibility=="hidden"', {
'cflags': [ '-fvisibility=hidden' ],
@ -581,10 +783,214 @@
], # target_conditions
}, # target_defaults
}], # OS=="mac"
['OS=="android"', {
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!(<(android_toolchain)/*-gcc -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64" or target_arch=="ia32") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}], # OS=="android"
['OS=="android" and clang==0', {
# Hardcode the compiler names in the Makefile so that
# it won't depend on the environment at make time.
'make_global_settings': [
['CC', '<!(/bin/echo -n <(android_toolchain)/*-gcc)'],
['CXX', '<!(/bin/echo -n <(android_toolchain)/*-g++)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
}],
['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
'make_global_settings': [
['CC.host', '../<(clang_dir)/bin/clang'],
['CXX.host', '../<(clang_dir)/bin/clang++'],
['CC.host', '<(clang_dir)/bin/clang'],
['CXX.host', '<(clang_dir)/bin/clang++'],
],
}],
['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
@ -609,8 +1015,8 @@
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [
['CC', '../<(clang_dir)/bin/clang'],
['CXX', '../<(clang_dir)/bin/clang++'],
['CC', '<(clang_dir)/bin/clang'],
['CXX', '<(clang_dir)/bin/clang++'],
['CC.host', '$(CC)'],
['CXX.host', '$(CXX)'],
],
@ -627,7 +1033,7 @@
['clang==1 and OS=="win"', {
'make_global_settings': [
# On Windows, gyp's ninja generator only looks at CC.
['CC', '../<(clang_dir)/bin/clang-cl'],
['CC', '<(clang_dir)/bin/clang-cl'],
],
}],
# TODO(yyanagisawa): supports GENERATOR==make

44
deps/v8/build/toolchain.gypi

@ -338,6 +338,26 @@
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64el") and v8_target_arch==target_arch', {
'target_conditions': [
['_toolset=="target"', {
# Target built with a Mips CXX compiler.
'variables': {
'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
},
'conditions': [
['ldso_path!=""', {
'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
}],
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
],
}],
],
}],
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
@ -384,11 +404,7 @@
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
'conditions': [
@ -571,11 +587,7 @@
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
'conditions': [
@ -770,20 +782,12 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [
'-mips64r6', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-mips64r2', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips64r2', '-mabi=64'],
}],
],
}, {

41
deps/v8/include/v8-debug.h

@ -60,20 +60,20 @@ class V8_EXPORT Debug {
* callbacks as their content becomes invalid. These objects are from the
* debugger event that started the debug message loop.
*/
virtual Handle<Object> GetExecutionState() const = 0;
virtual Handle<Object> GetEventData() const = 0;
virtual Local<Object> GetExecutionState() const = 0;
virtual Local<Object> GetEventData() const = 0;
/**
* Get the debugger protocol JSON.
*/
virtual Handle<String> GetJSON() const = 0;
virtual Local<String> GetJSON() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
virtual Local<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding request if any. This is the
@ -104,21 +104,21 @@ class V8_EXPORT Debug {
* Access to execution state and event data of the debug event. Don't store
* these cross callbacks as their content becomes invalid.
*/
virtual Handle<Object> GetExecutionState() const = 0;
virtual Handle<Object> GetEventData() const = 0;
virtual Local<Object> GetExecutionState() const = 0;
virtual Local<Object> GetEventData() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
virtual Local<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding callback when it was
* registered.
*/
virtual Handle<Value> GetCallbackData() const = 0;
virtual Local<Value> GetCallbackData() const = 0;
/**
* Client data passed to DebugBreakForCommand function. The
@ -156,7 +156,7 @@ class V8_EXPORT Debug {
typedef void (*DebugMessageDispatchHandler)();
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
Local<Value> data = Local<Value>());
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@ -196,20 +196,20 @@ class V8_EXPORT Debug {
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Value> Call(v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>()));
Local<Value> Call(v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>());
v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>());
/**
* Returns a mirror object for the given object.
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<Value> GetMirror(v8::Handle<v8::Value> obj));
Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Handle<v8::Value> obj);
v8::Local<v8::Value> obj);
/**
* Makes V8 process all pending debug messages.
@ -248,7 +248,8 @@ class V8_EXPORT Debug {
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change.
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext();
@ -259,6 +260,14 @@ class V8_EXPORT Debug {
* unexpectedly used. LiveEdit is enabled by default.
*/
static void SetLiveEditEnabled(Isolate* isolate, bool enable);
/**
* Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context.
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
};

11
deps/v8/include/v8-platform.h

@ -56,6 +56,17 @@ class Platform {
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
* |isolate| after the given number of seconds |delay_in_seconds|.
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
}
/**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least

28
deps/v8/include/v8-profiler.h

@ -60,13 +60,13 @@ class V8_EXPORT CpuProfileNode {
};
/** Returns function name (empty string for anonymous functions.) */
Handle<String> GetFunctionName() const;
Local<String> GetFunctionName() const;
/** Returns id of the script where function is located. */
int GetScriptId() const;
/** Returns resource name for script from where the function originates. */
Handle<String> GetScriptResourceName() const;
Local<String> GetScriptResourceName() const;
/**
* Returns the number, 1-based, of the line where the function originates.
@ -129,7 +129,7 @@ class V8_EXPORT CpuProfileNode {
class V8_EXPORT CpuProfile {
public:
/** Returns CPU profile title. */
Handle<String> GetTitle() const;
Local<String> GetTitle() const;
/** Returns the root node of the top down call tree. */
const CpuProfileNode* GetTopDownRoot() const;
@ -198,13 +198,13 @@ class V8_EXPORT CpuProfiler {
* |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree.
*/
void StartProfiling(Handle<String> title, bool record_samples = false);
void StartProfiling(Local<String> title, bool record_samples = false);
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
CpuProfile* StopProfiling(Handle<String> title);
CpuProfile* StopProfiling(Local<String> title);
/**
* Tells the profiler whether the embedder is idle.
@ -246,7 +246,7 @@ class V8_EXPORT HeapGraphEdge {
* Returns edge name. This can be a variable name, an element index, or
* a property name.
*/
Handle<Value> GetName() const;
Local<Value> GetName() const;
/** Returns origin node. */
const HeapGraphNode* GetFromNode() const;
@ -275,7 +275,8 @@ class V8_EXPORT HeapGraphNode {
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
kSymbol = 12 // A Symbol (ES6).
kSymbol = 12, // A Symbol (ES6).
kSimdValue = 13 // A SIMD value stored in the heap (Proposed ES7).
};
/** Returns node type (see HeapGraphNode::Type). */
@ -286,7 +287,7 @@ class V8_EXPORT HeapGraphNode {
* of the constructor (for objects), the name of the function (for
* closures), string value, or an empty string (for compiled code).
*/
Handle<String> GetName() const;
Local<String> GetName() const;
/**
* Returns node id. For the same heap object, the id remains the same
@ -429,8 +430,8 @@ class V8_EXPORT HeapProfiler {
* while the callback is running: only getters on the handle and
* GetPointerFromInternalField on the objects are allowed.
*/
typedef RetainedObjectInfo* (*WrapperInfoCallback)
(uint16_t class_id, Handle<Value> wrapper);
typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
Local<Value> wrapper);
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
@ -442,13 +443,13 @@ class V8_EXPORT HeapProfiler {
* Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
*/
SnapshotObjectId GetObjectId(Handle<Value> value);
SnapshotObjectId GetObjectId(Local<Value> value);
/**
* Returns heap object with given SnapshotObjectId if the object is alive,
* otherwise empty handle is returned.
*/
Handle<Value> FindObjectById(SnapshotObjectId id);
Local<Value> FindObjectById(SnapshotObjectId id);
/**
* Clears internal map from SnapshotObjectId to heap object. The new objects
@ -473,7 +474,8 @@ class V8_EXPORT HeapProfiler {
* Returns name to be used in the heap snapshot for given node. Returned
* string must stay alive until snapshot collection is completed.
*/
virtual const char* GetName(Handle<Object> object) = 0;
virtual const char* GetName(Local<Object> object) = 0;
protected:
virtual ~ObjectNameResolver() {}
};

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 63
#define V8_PATCH_LEVEL 30
#define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 103
#define V8_PATCH_LEVEL 24
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

1089
deps/v8/include/v8.h

File diff suppressed because it is too large

53
deps/v8/include/v8config.h

@ -5,6 +5,8 @@
#ifndef V8CONFIG_H_
#define V8CONFIG_H_
// clang-format off
// Platform headers for feature detection below.
#if defined(__ANDROID__)
# include <sys/cdefs.h>
@ -183,6 +185,7 @@
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
@ -199,8 +202,6 @@
#if defined(__GNUC__) // Clang in gcc mode.
# define V8_CC_GNU 1
#elif defined(_MSC_VER) // Clang in cl mode.
# define V8_CC_MSVC 1
#endif
// Clang defines __alignof__ as alias for __alignof
@ -223,6 +224,7 @@
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@ -230,10 +232,15 @@
#elif defined(__GNUC__)
# define V8_CC_GNU 1
// Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL (defined(__INTEL_COMPILER))
# define V8_CC_MINGW32 (defined(__MINGW32__))
# define V8_CC_MINGW64 (defined(__MINGW64__))
# if defined(__INTEL_COMPILER) // Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL 1
# endif
# if defined(__MINGW32__)
# define V8_CC_MINGW32 1
# endif
# if defined(__MINGW64__)
# define V8_CC_MINGW64 1
# endif
# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
@ -268,11 +275,10 @@
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif
#endif
#elif defined(_MSC_VER)
#if defined(_MSC_VER)
# define V8_CC_MSVC 1
# define V8_HAS___ALIGNOF 1
# define V8_HAS_DECLSPEC_ALIGN 1
@ -313,22 +319,33 @@
#endif
// A macro to mark classes or functions as deprecated.
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message)))
#define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated))
#define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else
# define V8_DEPRECATED(message, declarator) declarator
#define V8_DEPRECATED(message, declarator) declarator
#endif
// a macro to make it easier to see what will be deprecated.
// A macro (V8_DEPRECATE_SOON) to make it easier to see what will be deprecated.
#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) && \
V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) __declspec(deprecated) declarator
#else
#define V8_DEPRECATE_SOON(message, declarator) declarator
#endif
// A macro to provide the compiler with branch prediction information.
@ -402,4 +419,6 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
// clang-format on
#endif // V8CONFIG_H_

3
deps/v8/infra/OWNERS

@ -0,0 +1,3 @@
machenbach@chromium.org
sergiyb@chromium.org
tandrii@chromium.org

1
deps/v8/infra/README.md

@ -0,0 +1 @@
This directory contains infra-specific files.

0
deps/v8/testing/commit_queue/OWNERS → deps/v8/infra/config/OWNERS

52
deps/v8/infra/config/cq.cfg

@ -0,0 +1,52 @@
# See http://luci-config.appspot.com/schemas/projects/refs:cq.cfg for the
# documentation of this file format.
version: 1
cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
hide_ref_in_committed_msg: true
commit_burst_delay: 60
max_commit_burst: 1
target_ref: "refs/pending/heads/master"
rietveld {
url: "https://codereview.chromium.org"
}
verifiers {
reviewer_lgtm {
committer_list: "v8"
}
tree_status {
tree_status_url: "https://v8-status.appspot.com"
}
try_job {
buckets {
name: "tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
builders { name: "v8_linux64_rel" }
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg" }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
builders { name: "v8_linux_rel" }
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel" }
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
builders { name: "v8_win_rel" }
}
}
sign_cla {}
}

1
deps/v8/infra/project-config/README.md

@ -0,0 +1 @@
This directory contains v8 project-wide configurations for infra services.

23
deps/v8/infra/project-config/cr-buildbucket.cfg

@ -0,0 +1,23 @@
# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
#
# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
# schema of this file and documentation.
#
# Please keep this list sorted by bucket name.
buckets {
name: "master.tryserver.v8"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "service-account-cq"
}
acls {
role: WRITER
group: "service-account-v8-master"
}
}

71
deps/v8/samples/hello-world.cc

@ -0,0 +1,71 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
// Initialize V8.
V8::InitializeICU();
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
// Create a new Isolate and make it the current one.
ArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &allocator;
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
// Create a stack-allocated handle scope.
HandleScope handle_scope(isolate);
// Create a new context.
Local<Context> context = Context::New(isolate);
// Enter the context for compiling and running the hello world script.
Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
Local<String> source =
String::NewFromUtf8(isolate, "'Hello' + ', World!'",
NewStringType::kNormal).ToLocalChecked();
// Compile the source code.
Local<Script> script = Script::Compile(context, source).ToLocalChecked();
// Run the script to get the result.
Local<Value> result = script->Run(context).ToLocalChecked();
// Convert the result to an UTF8 string and print it.
String::Utf8Value utf8(result);
printf("%s\n", *utf8);
}
// Dispose the isolate and tear down V8.
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
delete platform;
return 0;
}

202
deps/v8/samples/process.cc

@ -93,8 +93,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
public:
// Creates a new processor that processes requests by invoking the
// Process function of the JavaScript script given as an argument.
JsHttpRequestProcessor(Isolate* isolate, Handle<String> script)
: isolate_(isolate), script_(script) { }
JsHttpRequestProcessor(Isolate* isolate, Local<String> script)
: isolate_(isolate), script_(script) {}
virtual ~JsHttpRequestProcessor();
virtual bool Initialize(map<string, string>* opts,
@ -104,7 +104,7 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
private:
// Execute the script associated with this processor and extract the
// Process function. Returns true if this succeeded, otherwise false.
bool ExecuteScript(Handle<String> script);
bool ExecuteScript(Local<String> script);
// Wrap the options and output map in a JavaScript objects and
// install it in the global namespace as 'options' and 'output'.
@ -112,8 +112,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// Constructs the template that describes the JavaScript wrapper
// type for requests.
static Handle<ObjectTemplate> MakeRequestTemplate(Isolate* isolate);
static Handle<ObjectTemplate> MakeMapTemplate(Isolate* isolate);
static Local<ObjectTemplate> MakeRequestTemplate(Isolate* isolate);
static Local<ObjectTemplate> MakeMapTemplate(Isolate* isolate);
// Callbacks that access the individual fields of request objects.
static void GetPath(Local<String> name,
@ -132,19 +132,19 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
Handle<Object> WrapMap(map<string, string>* obj);
static map<string, string>* UnwrapMap(Handle<Object> obj);
Handle<Object> WrapRequest(HttpRequest* obj);
static HttpRequest* UnwrapRequest(Handle<Object> obj);
Local<Object> WrapMap(map<string, string>* obj);
static map<string, string>* UnwrapMap(Local<Object> obj);
Local<Object> WrapRequest(HttpRequest* obj);
static HttpRequest* UnwrapRequest(Local<Object> obj);
Isolate* GetIsolate() { return isolate_; }
Isolate* isolate_;
Handle<String> script_;
Persistent<Context> context_;
Persistent<Function> process_;
static Persistent<ObjectTemplate> request_template_;
static Persistent<ObjectTemplate> map_template_;
Local<String> script_;
Global<Context> context_;
Global<Function> process_;
static Global<ObjectTemplate> request_template_;
static Global<ObjectTemplate> map_template_;
};
@ -156,7 +156,7 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static void LogCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1) return;
HandleScope scope(args.GetIsolate());
Handle<Value> arg = args[0];
Local<Value> arg = args[0];
String::Utf8Value value(arg);
HttpRequestProcessor::Log(*value);
}
@ -170,8 +170,9 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Create a template for the global object where we set the
// built-in global functions.
Handle<ObjectTemplate> global = ObjectTemplate::New(GetIsolate());
global->Set(String::NewFromUtf8(GetIsolate(), "log"),
Local<ObjectTemplate> global = ObjectTemplate::New(GetIsolate());
global->Set(String::NewFromUtf8(GetIsolate(), "log", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(GetIsolate(), LogCallback));
// Each processor gets its own context so different processors don't
@ -179,7 +180,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// is what we need for the reference to remain after we return from
// this method. That persistent handle has to be disposed in the
// destructor.
v8::Handle<v8::Context> context = Context::New(GetIsolate(), NULL, global);
v8::Local<v8::Context> context = Context::New(GetIsolate(), NULL, global);
context_.Reset(GetIsolate(), context);
// Enter the new context so all the following operations take place
@ -196,17 +197,21 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Handle<String> process_name = String::NewFromUtf8(GetIsolate(), "Process");
Handle<Value> process_val = context->Global()->Get(process_name);
Local<String> process_name =
String::NewFromUtf8(GetIsolate(), "Process", NewStringType::kNormal)
.ToLocalChecked();
Local<Value> process_val;
// If there is no Process function, or if it is not a function,
// bail out
if (!process_val->IsFunction()) return false;
if (!context->Global()->Get(context, process_name).ToLocal(&process_val) ||
!process_val->IsFunction()) {
return false;
}
// It is a function; cast it to a Function
Handle<Function> process_fun = Handle<Function>::Cast(process_val);
Local<Function> process_fun = Local<Function>::Cast(process_val);
// Store the function in a Persistent handle, since we also want
// Store the function in a Global handle, since we also want
// that to remain after this call returns
process_.Reset(GetIsolate(), process_fun);
@ -215,16 +220,18 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
}
bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
bool JsHttpRequestProcessor::ExecuteScript(Local<String> script) {
HandleScope handle_scope(GetIsolate());
// We're just about to compile the script; set up an error handler to
// catch any exceptions the script might throw.
TryCatch try_catch;
TryCatch try_catch(GetIsolate());
Local<Context> context(GetIsolate()->GetCurrentContext());
// Compile the script and check for errors.
Handle<Script> compiled_script = Script::Compile(script);
if (compiled_script.IsEmpty()) {
Local<Script> compiled_script;
if (!Script::Compile(context, script).ToLocal(&compiled_script)) {
String::Utf8Value error(try_catch.Exception());
Log(*error);
// The script failed to compile; bail out.
@ -232,8 +239,8 @@ bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
}
// Run the script!
Handle<Value> result = compiled_script->Run();
if (result.IsEmpty()) {
Local<Value> result;
if (!compiled_script->Run(context).ToLocal(&result)) {
// The TryCatch above is still in effect and will have caught the error.
String::Utf8Value error(try_catch.Exception());
Log(*error);
@ -249,18 +256,26 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
HandleScope handle_scope(GetIsolate());
// Wrap the map object in a JavaScript wrapper
Handle<Object> opts_obj = WrapMap(opts);
Local<Object> opts_obj = WrapMap(opts);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Set the options object as a property on the global object.
context->Global()->Set(String::NewFromUtf8(GetIsolate(), "options"),
opts_obj);
Handle<Object> output_obj = WrapMap(output);
context->Global()->Set(String::NewFromUtf8(GetIsolate(), "output"),
output_obj);
context->Global()
->Set(context,
String::NewFromUtf8(GetIsolate(), "options", NewStringType::kNormal)
.ToLocalChecked(),
opts_obj)
.FromJust();
Local<Object> output_obj = WrapMap(output);
context->Global()
->Set(context,
String::NewFromUtf8(GetIsolate(), "output", NewStringType::kNormal)
.ToLocalChecked(),
output_obj)
.FromJust();
return true;
}
@ -278,19 +293,19 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
Context::Scope context_scope(context);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
Local<Object> request_obj = WrapRequest(request);
// Set up an exception handler before calling the Process function
TryCatch try_catch;
TryCatch try_catch(GetIsolate());
// Invoke the process function, giving the global object as 'this'
// and one argument, the request.
const int argc = 1;
Handle<Value> argv[argc] = { request_obj };
Local<Value> argv[argc] = {request_obj};
v8::Local<v8::Function> process =
v8::Local<v8::Function>::New(GetIsolate(), process_);
Handle<Value> result = process->Call(context->Global(), argc, argv);
if (result.IsEmpty()) {
Local<Value> result;
if (!process->Call(context, context->Global(), argc, argv).ToLocal(&result)) {
String::Utf8Value error(try_catch.Exception());
Log(*error);
return false;
@ -309,8 +324,8 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
}
Persistent<ObjectTemplate> JsHttpRequestProcessor::request_template_;
Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
Global<ObjectTemplate> JsHttpRequestProcessor::request_template_;
Global<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// -----------------------------------
@ -319,25 +334,26 @@ Persistent<ObjectTemplate> JsHttpRequestProcessor::map_template_;
// Utility function that wraps a C++ http request object in a
// JavaScript object.
Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Handle scope for temporary handles.
Local<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Local scope for temporary handles.
EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript map wrappers.
// It only has to be created once, which we do on demand.
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
Local<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
map_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
// Create an empty map wrapper.
Local<Object> result = templ->NewInstance();
Local<Object> result =
templ->NewInstance(GetIsolate()->GetCurrentContext()).ToLocalChecked();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> map_ptr = External::New(GetIsolate(), obj);
Local<External> map_ptr = External::New(GetIsolate(), obj);
// Store the map pointer in the JavaScript wrapper.
result->SetInternalField(0, map_ptr);
@ -352,8 +368,8 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// Utility function that extracts the C++ map pointer from a wrapper
// object.
map<string, string>* JsHttpRequestProcessor::UnwrapMap(Handle<Object> obj) {
Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
map<string, string>* JsHttpRequestProcessor::UnwrapMap(Local<Object> obj) {
Local<External> field = Local<External>::Cast(obj->GetInternalField(0));
void* ptr = field->Value();
return static_cast<map<string, string>*>(ptr);
}
@ -385,9 +401,10 @@ void JsHttpRequestProcessor::MapGet(Local<Name> name,
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), value.c_str(), String::kNormalString,
static_cast<int>(value.length())));
info.GetReturnValue().Set(
String::NewFromUtf8(info.GetIsolate(), value.c_str(),
NewStringType::kNormal,
static_cast<int>(value.length())).ToLocalChecked());
}
@ -410,7 +427,7 @@ void JsHttpRequestProcessor::MapSet(Local<Name> name, Local<Value> value_obj,
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Local<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Isolate* isolate) {
EscapableHandleScope handle_scope(isolate);
@ -431,25 +448,26 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
* Utility function that wraps a C++ http request object in a
* JavaScript object.
*/
Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Handle scope for temporary handles.
Local<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// Local scope for temporary handles.
EscapableHandleScope handle_scope(GetIsolate());
// Fetch the template for creating JavaScript http request wrappers.
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate(GetIsolate());
Local<ObjectTemplate> raw_template = MakeRequestTemplate(GetIsolate());
request_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
// Create an empty http request wrapper.
Local<Object> result = templ->NewInstance();
Local<Object> result =
templ->NewInstance(GetIsolate()->GetCurrentContext()).ToLocalChecked();
// Wrap the raw C++ pointer in an External so it can be referenced
// from within JavaScript.
Handle<External> request_ptr = External::New(GetIsolate(), request);
Local<External> request_ptr = External::New(GetIsolate(), request);
// Store the request pointer in the JavaScript wrapper.
result->SetInternalField(0, request_ptr);
@ -466,8 +484,8 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
* Utility function that extracts the C++ http request object from a
* wrapper object.
*/
HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle<Object> obj) {
Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Local<Object> obj) {
Local<External> field = Local<External>::Cast(obj->GetInternalField(0));
void* ptr = field->Value();
return static_cast<HttpRequest*>(ptr);
}
@ -482,9 +500,10 @@ void JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
info.GetReturnValue().Set(
String::NewFromUtf8(info.GetIsolate(), path.c_str(),
NewStringType::kNormal,
static_cast<int>(path.length())).ToLocalChecked());
}
@ -493,9 +512,10 @@ void JsHttpRequestProcessor::GetReferrer(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
info.GetReturnValue().Set(
String::NewFromUtf8(info.GetIsolate(), path.c_str(),
NewStringType::kNormal,
static_cast<int>(path.length())).ToLocalChecked());
}
@ -503,9 +523,10 @@ void JsHttpRequestProcessor::GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
info.GetReturnValue().Set(
String::NewFromUtf8(info.GetIsolate(), path.c_str(),
NewStringType::kNormal,
static_cast<int>(path.length())).ToLocalChecked());
}
@ -514,13 +535,14 @@ void JsHttpRequestProcessor::GetUserAgent(
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
info.GetReturnValue().Set(String::NewFromUtf8(
info.GetIsolate(), path.c_str(), String::kNormalString,
static_cast<int>(path.length())));
info.GetReturnValue().Set(
String::NewFromUtf8(info.GetIsolate(), path.c_str(),
NewStringType::kNormal,
static_cast<int>(path.length())).ToLocalChecked());
}
Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
Local<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
Isolate* isolate) {
EscapableHandleScope handle_scope(isolate);
@ -529,16 +551,20 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
// Add accessors for each of the fields of the request.
result->SetAccessor(
String::NewFromUtf8(isolate, "path", String::kInternalizedString),
String::NewFromUtf8(isolate, "path", NewStringType::kInternalized)
.ToLocalChecked(),
GetPath);
result->SetAccessor(
String::NewFromUtf8(isolate, "referrer", String::kInternalizedString),
String::NewFromUtf8(isolate, "referrer", NewStringType::kInternalized)
.ToLocalChecked(),
GetReferrer);
result->SetAccessor(
String::NewFromUtf8(isolate, "host", String::kInternalizedString),
String::NewFromUtf8(isolate, "host", NewStringType::kInternalized)
.ToLocalChecked(),
GetHost);
result->SetAccessor(
String::NewFromUtf8(isolate, "userAgent", String::kInternalizedString),
String::NewFromUtf8(isolate, "userAgent", NewStringType::kInternalized)
.ToLocalChecked(),
GetUserAgent);
// Again, return the result through the current handle scope.
@ -604,9 +630,9 @@ void ParseOptions(int argc,
// Reads a file into a v8 string.
Handle<String> ReadFile(Isolate* isolate, const string& name) {
MaybeLocal<String> ReadFile(Isolate* isolate, const string& name) {
FILE* file = fopen(name.c_str(), "rb");
if (file == NULL) return Handle<String>();
if (file == NULL) return MaybeLocal<String>();
fseek(file, 0, SEEK_END);
size_t size = ftell(file);
@ -618,12 +644,12 @@ Handle<String> ReadFile(Isolate* isolate, const string& name) {
i += fread(&chars[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return Handle<String>();
return MaybeLocal<String>();
}
}
fclose(file);
Handle<String> result = String::NewFromUtf8(
isolate, chars, String::kNormalString, static_cast<int>(size));
MaybeLocal<String> result = String::NewFromUtf8(
isolate, chars, NewStringType::kNormal, static_cast<int>(size));
delete[] chars;
return result;
}
@ -676,8 +702,8 @@ int main(int argc, char* argv[]) {
Isolate* isolate = Isolate::New(create_params);
Isolate::Scope isolate_scope(isolate);
HandleScope scope(isolate);
Handle<String> source = ReadFile(isolate, file);
if (source.IsEmpty()) {
Local<String> source;
if (!ReadFile(isolate, file).ToLocal(&source)) {
fprintf(stderr, "Error reading '%s'.\n", file.c_str());
return 1;
}

10
deps/v8/samples/samples.gyp

@ -40,6 +40,10 @@
'include_dirs': [
'..',
],
'defines': [
# TODO(jochen): Remove again after this is globally turned on.
'V8_IMMINENT_DEPRECATION_WARNINGS',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
@ -61,6 +65,12 @@
'shell.cc',
],
},
{
'target_name': 'hello-world',
'sources': [
'hello-world.cc',
],
},
{
'target_name': 'process',
'sources': [

151
deps/v8/samples/shell.cc

@ -44,20 +44,18 @@
*/
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate);
void RunShell(v8::Handle<v8::Context> context);
v8::Local<v8::Context> CreateShellContext(v8::Isolate* isolate);
void RunShell(v8::Local<v8::Context> context);
int RunMain(v8::Isolate* isolate, int argc, char* argv[]);
bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
bool print_result,
bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
v8::Local<v8::Value> name, bool print_result,
bool report_exceptions);
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
v8::MaybeLocal<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@ -90,7 +88,7 @@ int main(int argc, char* argv[]) {
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::Context> context = CreateShellContext(isolate);
v8::Local<v8::Context> context = CreateShellContext(isolate);
if (context.IsEmpty()) {
fprintf(stderr, "Error creating context\n");
return 1;
@ -115,23 +113,30 @@ const char* ToCString(const v8::String::Utf8Value& value) {
// Creates a new execution environment containing the built-in
// functions.
v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
v8::Local<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::NewFromUtf8(isolate, "print"),
global->Set(
v8::String::NewFromUtf8(isolate, "print", v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::FunctionTemplate::New(isolate, Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::NewFromUtf8(isolate, "read"),
global->Set(v8::String::NewFromUtf8(
isolate, "read", v8::NewStringType::kNormal).ToLocalChecked(),
v8::FunctionTemplate::New(isolate, Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::NewFromUtf8(isolate, "load"),
global->Set(v8::String::NewFromUtf8(
isolate, "load", v8::NewStringType::kNormal).ToLocalChecked(),
v8::FunctionTemplate::New(isolate, Load));
// Bind the 'quit' function
global->Set(v8::String::NewFromUtf8(isolate, "quit"),
global->Set(v8::String::NewFromUtf8(
isolate, "quit", v8::NewStringType::kNormal).ToLocalChecked(),
v8::FunctionTemplate::New(isolate, Quit));
// Bind the 'version' function
global->Set(v8::String::NewFromUtf8(isolate, "version"),
global->Set(
v8::String::NewFromUtf8(isolate, "version", v8::NewStringType::kNormal)
.ToLocalChecked(),
v8::FunctionTemplate::New(isolate, Version));
return v8::Context::New(isolate, NULL, global);
@ -165,19 +170,22 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters"));
v8::String::NewFromUtf8(args.GetIsolate(), "Bad parameters",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
args.GetReturnValue().Set(source);
@ -193,22 +201,21 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
v8::Handle<v8::String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error loading file",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
v8::String::NewFromUtf8(args.GetIsolate(), *file),
false,
false)) {
if (!ExecuteString(args.GetIsolate(), source, args[i], false, false)) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file"));
v8::String::NewFromUtf8(args.GetIsolate(), "Error executing file",
v8::NewStringType::kNormal).ToLocalChecked());
return;
}
}
@ -220,7 +227,8 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
// If not arguments are given args[0] will yield undefined which
// converts to the integer value 0.
int exit_code = args[0]->Int32Value();
int exit_code =
args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromMaybe(0);
fflush(stdout);
fflush(stderr);
exit(exit_code);
@ -229,14 +237,15 @@ void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(
v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion()));
v8::String::NewFromUtf8(args.GetIsolate(), v8::V8::GetVersion(),
v8::NewStringType::kNormal).ToLocalChecked());
}
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
v8::MaybeLocal<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
if (file == NULL) return v8::MaybeLocal<v8::String>();
fseek(file, 0, SEEK_END);
size_t size = ftell(file);
@ -248,12 +257,12 @@ v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
i += fread(&chars[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return v8::Handle<v8::String>();
return v8::MaybeLocal<v8::String>();
}
}
fclose(file);
v8::Handle<v8::String> result = v8::String::NewFromUtf8(
isolate, chars, v8::String::kNormalString, static_cast<int>(size));
v8::MaybeLocal<v8::String> result = v8::String::NewFromUtf8(
isolate, chars, v8::NewStringType::kNormal, static_cast<int>(size));
delete[] chars;
return result;
}
@ -274,16 +283,23 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
"Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name =
v8::String::NewFromUtf8(isolate, "unnamed");
v8::Handle<v8::String> source =
v8::String::NewFromUtf8(isolate, argv[++i]);
v8::Local<v8::String> file_name =
v8::String::NewFromUtf8(isolate, "unnamed",
v8::NewStringType::kNormal).ToLocalChecked();
v8::Local<v8::String> source;
if (!v8::String::NewFromUtf8(isolate, argv[++i],
v8::NewStringType::kNormal)
.ToLocal(&source)) {
return 1;
}
if (!ExecuteString(isolate, source, file_name, false, true)) return 1;
} else {
// Use all other arguments as names of files to load and run.
v8::Handle<v8::String> file_name = v8::String::NewFromUtf8(isolate, str);
v8::Handle<v8::String> source = ReadFile(isolate, str);
if (source.IsEmpty()) {
v8::Local<v8::String> file_name =
v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal)
.ToLocalChecked();
v8::Local<v8::String> source;
if (!ReadFile(isolate, str).ToLocal(&source)) {
fprintf(stderr, "Error reading '%s'\n", str);
continue;
}
@ -295,47 +311,47 @@ int RunMain(v8::Isolate* isolate, int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
void RunShell(v8::Local<v8::Context> context) {
fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(
v8::String::NewFromUtf8(context->GetIsolate(), "(shell)"));
v8::String::NewFromUtf8(context->GetIsolate(), "(shell)",
v8::NewStringType::kNormal).ToLocalChecked());
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope(context->GetIsolate());
ExecuteString(context->GetIsolate(),
v8::String::NewFromUtf8(context->GetIsolate(), str),
name,
true,
true);
ExecuteString(
context->GetIsolate(),
v8::String::NewFromUtf8(context->GetIsolate(), str,
v8::NewStringType::kNormal).ToLocalChecked(),
name, true, true);
}
fprintf(stderr, "\n");
}
// Executes a string within the current v8 context.
bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
bool print_result,
bool ExecuteString(v8::Isolate* isolate, v8::Local<v8::String> source,
v8::Local<v8::Value> name, bool print_result,
bool report_exceptions) {
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch;
v8::TryCatch try_catch(isolate);
v8::ScriptOrigin origin(name);
v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin);
if (script.IsEmpty()) {
v8::Local<v8::Context> context(isolate->GetCurrentContext());
v8::Local<v8::Script> script;
if (!v8::Script::Compile(context, source, &origin).ToLocal(&script)) {
// Print errors that happened during compilation.
if (report_exceptions)
ReportException(isolate, &try_catch);
return false;
} else {
v8::Handle<v8::Value> result = script->Run();
if (result.IsEmpty()) {
v8::Local<v8::Value> result;
if (!script->Run(context).ToLocal(&result)) {
assert(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions)
@ -360,7 +376,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
v8::HandleScope handle_scope(isolate);
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
v8::Handle<v8::Message> message = try_catch->Message();
v8::Local<v8::Message> message = try_catch->Message();
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
@ -368,24 +384,27 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
v8::Local<v8::Context> context(isolate->GetCurrentContext());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
int linenum = message->GetLineNumber(context).FromJust();
fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
v8::String::Utf8Value sourceline(
message->GetSourceLine(context).ToLocalChecked());
const char* sourceline_string = ToCString(sourceline);
fprintf(stderr, "%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
int start = message->GetStartColumn(context).FromJust();
for (int i = 0; i < start; i++) {
fprintf(stderr, " ");
}
int end = message->GetEndColumn();
int end = message->GetEndColumn(context).FromJust();
for (int i = start; i < end; i++) {
fprintf(stderr, "^");
}
fprintf(stderr, "\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
v8::String::Utf8Value stack_trace(
try_catch->StackTrace(context).ToLocalChecked());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
fprintf(stderr, "%s\n", stack_trace_string);

173
deps/v8/src/accessors.cc

@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
Handle<Object> set = v8::FromCData(isolate, setter);
@ -126,31 +127,6 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
}
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::Name> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false;
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
// This behaves sloppy since we lost the actual strict-mode.
// TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
// properties.
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return true;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
}
if (!object->map()->is_extensible()) return true;
JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
value, NONE).Check();
}
return true;
}
//
// Accessors::ArgumentsIterator
//
@ -174,8 +150,6 @@ void Accessors::ArgumentsIteratorSetter(
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
@ -199,21 +173,6 @@ Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
//
// The helper function will 'flatten' Number objects.
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
has_initial_map());
if (wrapper->map() == isolate->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}
return value;
}
void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@ -226,44 +185,55 @@ void Accessors::ArrayLengthGetter(
}
// Tries to non-observably convert |value| to a valid array length.
// Returns false if it fails.
static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
uint32_t* length) {
if (value->ToArrayLength(length)) return true;
// We don't support AsArrayLength, so use AsArrayIndex for now. This just
// misses out on kMaxUInt32.
if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
return false;
}
void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
value = FlattenNumber(isolate, value);
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
MaybeHandle<Object> maybe;
uint32_t length = 0;
if (!FastAsArrayLength(isolate, length_obj, &length)) {
Handle<Object> uint32_v;
maybe = Execution::ToUint32(isolate, value);
if (!maybe.ToHandle(&uint32_v)) {
if (!Execution::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> number_v;
maybe = Execution::ToNumber(isolate, value);
if (!maybe.ToHandle(&number_v)) {
if (!Execution::ToNumber(isolate, length_obj).ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
if (uint32_v->Number() == number_v->Number()) {
maybe = JSArray::SetElementsLength(array_handle, uint32_v);
if (maybe.is_null()) isolate->OptionalRescheduleException(false);
return;
if (uint32_v->Number() != number_v->Number()) {
Handle<Object> exception = isolate->factory()->NewRangeError(
MessageTemplate::kInvalidArrayLength);
return isolate->ScheduleThrow(*exception);
}
Handle<Object> exception =
isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
isolate->ScheduleThrow(*exception);
CHECK(uint32_v->ToArrayLength(&length));
}
if (JSArray::ObservableSetLength(array, length).is_null()) {
isolate->OptionalRescheduleException(false);
}
}
@ -706,8 +676,9 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
bool is_embedder_debug_script =
Script::cast(JSValue::cast(object)->value())->is_embedder_debug_script();
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@ -970,9 +941,6 @@ void Accessors::FunctionPrototypeSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionPrototype(isolate, object, value).is_null()) {
@ -1061,8 +1029,6 @@ void Accessors::FunctionLengthSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionLength(isolate, object, value).is_null()) {
@ -1120,8 +1086,6 @@ void Accessors::FunctionNameSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionName(isolate, object, value).is_null()) {
@ -1151,22 +1115,41 @@ static Handle<Object> ArgumentsForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
SlotRefValueBuilder slot_refs(
frame, inlined_frame_index,
inlined_function->shared()->internal_formal_parameter_count());
int args_count = slot_refs.args_length();
TranslatedState translated_values(frame);
translated_values.Prepare(false, frame->fp());
int argument_count = 0;
TranslatedFrame* translated_frame =
translated_values.GetArgumentsInfoFromJSFrameIndex(inlined_frame_index,
&argument_count);
TranslatedFrame::iterator iter = translated_frame->begin();
// Skip the function.
iter++;
// Skip the receiver.
iter++;
argument_count--;
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = slot_refs.GetNext(isolate, 0);
factory->NewArgumentsObject(inlined_function, argument_count);
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deopt because we might alias
// an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
array->set(i, *value);
iter++;
}
slot_refs.Finish(isolate);
arguments->set_elements(*array);
if (should_deoptimize) {
translated_values.StoreMaterializedValuesAndDeopt();
}
// Return the freshly allocated arguments object.
return arguments;
}
@ -1437,9 +1420,19 @@ static void ModuleGetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* value = context->get(slot);
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
@ -1459,9 +1452,18 @@ static void ModuleSetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* old_value = context->get(slot);
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
@ -1493,4 +1495,5 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

5
deps/v8/src/accessors.h

@ -98,11 +98,6 @@ class Accessors : public AllStatic {
static Handle<ExecutableAccessorInfo> CloneAccessor(
Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor);
private:
// Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
};
} } // namespace v8::internal

3
deps/v8/src/allocation-site-scopes.cc

@ -76,4 +76,5 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
return false;
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

3
deps/v8/src/allocation-tracker.cc

@ -337,4 +337,5 @@ void AllocationTracker::UnresolvedLocation::HandleWeakScript(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

3
deps/v8/src/allocation.cc

@ -108,4 +108,5 @@ void AlignedFree(void *ptr) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

100
deps/v8/src/api-natives.cc

@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/messages.h"
namespace v8 {
namespace internal {
@ -36,11 +37,12 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
}
MaybeHandle<Object> DefineAccessorProperty(
Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
Handle<Object> getter, Handle<Object> setter, Smi* attributes) {
DCHECK(PropertyDetails::AttributesField::is_valid(
static_cast<PropertyAttributes>(attributes->value())));
MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> name,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes) {
if (!getter->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, getter,
@ -55,10 +57,8 @@ MaybeHandle<Object> DefineAccessorProperty(
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
}
RETURN_ON_EXCEPTION(isolate,
JSObject::DefineAccessor(
object, name, getter, setter,
static_cast<PropertyAttributes>(attributes->value())),
RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
setter, attributes),
Object);
return object;
}
@ -66,46 +66,29 @@ MaybeHandle<Object> DefineAccessorProperty(
MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key,
Handle<Name> name,
Handle<Object> prop_data,
Smi* unchecked_attributes) {
DCHECK((unchecked_attributes->value() &
~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
// Compute attributes.
PropertyAttributes attributes =
static_cast<PropertyAttributes>(unchecked_attributes->value());
PropertyAttributes attributes) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
Instantiate(isolate, prop_data, key), Object);
Instantiate(isolate, prop_data, name), Object);
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
#ifdef DEBUG
bool duplicate;
if (key->IsName()) {
LookupIterator it(object, Handle<Name>::cast(key),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
duplicate = it.IsFound();
} else {
uint32_t index = 0;
key->ToArrayIndex(&index);
Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
if (!maybe.IsJust()) return MaybeHandle<Object>();
duplicate = maybe.FromJust();
}
if (duplicate) {
Handle<Object> args[1] = {key};
THROW_NEW_ERROR(isolate, NewTypeError("duplicate_template_property",
HandleVector(args, 1)),
if (it.IsFound()) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kDuplicateTemplateProperty, name),
Object);
}
#endif
RETURN_ON_EXCEPTION(
isolate, Runtime::DefineObjectProperty(object, key, value, attributes),
Object);
return object;
return Object::AddDataProperty(&it, value, attributes, STRICT,
Object::CERTAINLY_NOT_STORE_FROM_KEYED);
}
@ -160,27 +143,28 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
HandleScope scope(isolate);
// Disable access checks while instantiating the object.
AccessCheckDisableScope access_check_scope(isolate, obj);
for (int i = 0; i < properties.length();) {
int length = Smi::cast(properties.get(i))->value();
if (length == 3) {
auto name = handle(Name::cast(properties.get(i + 1)), isolate);
auto prop_data = handle(properties.get(i + 2), isolate);
auto attributes = Smi::cast(properties.get(i + 3));
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties.get(i++)), isolate);
PropertyDetails details(Smi::cast(properties.get(i++)));
PropertyAttributes attributes = details.attributes();
PropertyKind kind = details.kind();
if (kind == kData) {
auto prop_data = handle(properties.get(i++), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
} else {
DCHECK(length == 4);
auto name = handle(Name::cast(properties.get(i + 1)), isolate);
auto getter = handle(properties.get(i + 2), isolate);
auto setter = handle(properties.get(i + 3), isolate);
auto attributes = Smi::cast(properties.get(i + 4));
auto getter = handle(properties.get(i++), isolate);
auto setter = handle(properties.get(i++), isolate);
RETURN_ON_EXCEPTION(isolate,
DefineAccessorProperty(isolate, obj, name, getter,
setter, attributes),
JSObject);
}
i += length + 1;
}
return obj;
}
@ -321,8 +305,8 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
list = NeanderArray(isolate).value();
templ->set_property_list(*list);
}
templ->set_number_of_properties(templ->number_of_properties() + 1);
NeanderArray array(list);
array.add(isolate, isolate->factory()->NewNumberFromInt(length));
for (int i = 0; i < length; i++) {
Handle<Object> value =
data[i].is_null()
@ -371,10 +355,9 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
const int kSize = 3;
DCHECK(Smi::IsValid(static_cast<int>(attributes)));
auto attribute_handle =
handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
Handle<Object> data[kSize] = {name, value, attribute_handle};
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[kSize] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, kSize, data);
}
@ -386,10 +369,9 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
const int kSize = 4;
DCHECK(Smi::IsValid(static_cast<int>(attributes)));
auto attribute_handle =
handle(Smi::FromInt(static_cast<int>(attributes)), isolate);
Handle<Object> data[kSize] = {name, getter, setter, attribute_handle};
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[kSize] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, kSize, data);
}

1231
deps/v8/src/api.cc

File diff suppressed because it is too large

15
deps/v8/src/api.h

@ -95,6 +95,7 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(
v8::internal::Foreign::cast(obj)->foreign_address()));
@ -105,6 +106,7 @@ template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@ -146,6 +148,8 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
V(ArrayBuffer, JSArrayBuffer) \
V(ArrayBufferView, JSArrayBufferView) \
V(TypedArray, JSTypedArray) \
@ -159,6 +163,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \
V(String, String) \
V(Symbol, Symbol) \
@ -202,6 +207,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<Map> ToLocal(
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
@ -230,6 +239,9 @@ class Utils {
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
@ -356,10 +368,13 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)

28
deps/v8/src/arguments.cc

@ -11,18 +11,18 @@ namespace v8 {
namespace internal {
template<typename T>
template<typename V>
v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
template <typename T>
template <typename V>
v8::Local<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Handle<V>();
if ((*handle)->IsTheHole()) return v8::Local<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
}
v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@ -36,18 +36,18 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
}
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
@ -55,20 +55,19 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
}
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1, \
Arg2 arg2) { \
v8::Local<ReturnValue> PropertyCallbackArguments::Call( \
Function f, Arg1 arg1, Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
}
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
@ -102,4 +101,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

12
deps/v8/src/arguments.h

@ -128,8 +128,8 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
protected:
explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
template<typename V>
v8::Handle<V> GetReturnValue(Isolate* isolate);
template <typename V>
v8::Local<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
@ -178,13 +178,13 @@ class PropertyCallbackArguments
* New style callbacks always use the return value.
*/
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Handle<ReturnValue> Call(Function f); \
v8::Local<ReturnValue> Call(Function f);
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Handle<ReturnValue> Call(Function f, Arg1 arg1); \
v8::Local<ReturnValue> Call(Function f, Arg1 arg1);
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Handle<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2); \
v8::Local<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2);
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void Call(Function f, Arg1 arg1, Arg2 arg2); \
@ -250,7 +250,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
v8::Handle<v8::Value> Call(FunctionCallback f);
v8::Local<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;

27
deps/v8/src/arm/assembler-arm-inl.h

@ -118,10 +118,11 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_ool_constant_pool ||
if (FLAG_enable_embedded_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the
// serializer and expects the address to reside within the code object.
// We return the PC for embedded constant pool since this function is used
// by the serializer and expects the address to reside within the code
// object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@ -543,7 +544,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
set_target_address_at(constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
@ -560,21 +561,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
(FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
(FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
}
}
Address Assembler::constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool) {
if (FLAG_enable_ool_constant_pool) {
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
@ -602,7 +603,7 @@ Address Assembler::constant_pool_entry_address(
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
}
return reinterpret_cast<Address>(constant_pool) + cp_offset;
return constant_pool + cp_offset;
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
@ -611,8 +612,7 @@ Address Assembler::constant_pool_entry_address(
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
@ -643,8 +643,7 @@ Address Assembler::target_address_at(Address pc,
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {

634
deps/v8/src/arm/assembler-arm.cc

@ -234,9 +234,9 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
// specially coded on ARM means that it is a movw/movt instruction, or is an
// out of line constant pool entry.  These only occur if
// FLAG_enable_ool_constant_pool is true.
return FLAG_enable_ool_constant_pool;
// embedded constant pool entry.  These only occur if
// FLAG_enable_embedded_constant_pool is true.
return FLAG_enable_embedded_constant_pool;
}
@ -449,11 +449,11 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(),
constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@ -471,23 +471,30 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
if (!FLAG_enable_ool_constant_pool) {
// Emit constant pool if necessary.
int constant_pool_offset = 0;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_offset = EmitEmbeddedConstantPool();
} else {
CheckConstPool(true, false);
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
}
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@ -623,7 +630,7 @@ Register Assembler::GetRm(Instr instr) {
Instr Assembler::GetConsantPoolLoadPattern() {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedPattern;
} else {
return kLdrPCImmedPattern;
@ -632,7 +639,7 @@ Instr Assembler::GetConsantPoolLoadPattern() {
Instr Assembler::GetConsantPoolLoadMask() {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedMask;
} else {
return kLdrPCImmedMask;
@ -1044,8 +1051,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (FLAG_enable_ool_constant_pool && assembler != NULL &&
!assembler->is_ool_constant_pool_available()) {
if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
!assembler->is_constant_pool_available()) {
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@ -1074,8 +1081,9 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) {
// An extended constant pool load.
} else if (assembler != NULL &&
assembler->ConstantPoolAccessIsInOverflow()) {
// An overflowed constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
// A small constant pool load.
@ -1100,23 +1108,23 @@ int Operand::instructions_required(const Assembler* assembler,
void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
RecordRelocInfo(x.rmode_);
}
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) {
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
if (!FLAG_enable_embedded_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
} else {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
@ -1126,10 +1134,11 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond);
}
} else {
DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
ConstantPoolEntry::Access access =
ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
if (access == ConstantPoolEntry::OVERFLOWED) {
DCHECK(FLAG_enable_embedded_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) {
@ -1144,8 +1153,9 @@ void Assembler::move_32_bit_immediate(Register rd,
// Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond);
} else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION);
ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
DCHECK(access == ConstantPoolEntry::REGULAR);
ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
cond);
}
}
}
@ -1315,8 +1325,7 @@ int Assembler::branch_offset(Label* L) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
if (!is_const_pool_blocked()) BlockConstPoolFor(1);
BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@ -2573,7 +2582,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
} else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@ -2589,18 +2598,17 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
RelocInfo rinfo(pc_, imm);
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
if (access == ConstantPoolEntry::OVERFLOWED) {
DCHECK(FLAG_enable_embedded_constant_pool);
// Emit instructions to load constant pool offset.
movw(ip, 0);
movt(ip, 0);
// Load from constant pool at offset.
vldr(dst, MemOperand(pp, ip));
} else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION);
vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
DCHECK(access == ConstantPoolEntry::REGULAR);
vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
}
} else {
// Synthesise the double from ARM immediates.
@ -2615,7 +2623,8 @@ void Assembler::vmov(const DwVfpRegister dst,
} else if (scratch.is(no_reg)) {
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
if ((lo & 0xffff) == (hi & 0xffff)) {
if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
movt(ip, hi >> 16);
} else {
mov(ip, Operand(hi));
@ -3574,22 +3583,6 @@ void Assembler::GrowBuffer() {
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
DCHECK(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
constant_pool_builder_.Relocate(pc_delta);
}
@ -3597,8 +3590,8 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -3609,14 +3602,26 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t value) {
// No relocation info should be pending while using dq. dq is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
}
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
@ -3626,64 +3631,73 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
RecordRelocInfo(rinfo);
}
void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
(rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
!emit_debug_code())) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(rinfo.pc(),
rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
}
}
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
const RelocInfo& rinfo) {
if (FLAG_enable_ool_constant_pool) {
return constant_pool_builder_.AddEntry(this, rinfo);
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
!(serializer_enabled() || rmode < RelocInfo::CELL);
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value, sharing_ok);
} else {
if (rinfo.rmode() == RelocInfo::NONE64) {
DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
if (num_pending_64_bit_reloc_info_ == 0) {
first_const_pool_64_use_ = pc_offset();
DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
if (num_pending_32_bit_constants_ == 0) {
first_const_pool_32_use_ = position;
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
} else {
DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
ConstantPoolEntry entry(position, value, sharing_ok);
pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
return ConstantPoolEntry::REGULAR;
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
double value) {
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value);
} else {
DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
if (num_pending_64_bit_constants_ == 0) {
first_const_pool_64_use_ = position;
}
ConstantPoolEntry entry(position, value);
pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
return ConstantPoolArray::SMALL_SECTION;
return ConstantPoolEntry::REGULAR;
}
}
void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@ -3692,10 +3706,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
DCHECK((num_pending_32_bit_constants_ == 0) ||
(start - first_const_pool_32_use_ +
num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
num_pending_64_bit_constants_ * kDoubleSize <
kMaxDistToIntPool));
DCHECK((num_pending_64_bit_constants_ == 0) ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
@ -3708,10 +3723,10 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@ -3725,8 +3740,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
if ((num_pending_32_bit_reloc_info_ == 0) &&
(num_pending_64_bit_reloc_info_ == 0)) {
if ((num_pending_32_bit_constants_ == 0) &&
(num_pending_64_bit_constants_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@ -3737,18 +3752,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
int estimated_size_after_marker =
num_pending_32_bit_constants_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
require_64_bit_align = IsAligned(
reinterpret_cast<intptr_t>(pc_ + size_up_to_marker), kDoubleAlignment);
if (require_64_bit_align) {
size_after_marker += kInstrSize;
estimated_size_after_marker += kInstrSize;
}
size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
}
int size = size_up_to_marker + size_after_marker;
int estimated_size = size_up_to_marker + estimated_size_after_marker;
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
@ -3762,17 +3778,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
bool need_emit = false;
if (has_fp_values) {
int dist64 = pc_offset() +
size -
num_pending_32_bit_reloc_info_ * kPointerSize -
int dist64 = pc_offset() + estimated_size -
num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
need_emit = true;
}
}
int dist32 =
pc_offset() + size - first_const_pool_32_use_;
int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
@ -3780,6 +3794,37 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (!need_emit) return;
}
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
for (int i = 0; i < num_pending_64_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!entry.is_merged());
for (int j = 0; j < i; j++) {
if (entry.value64() == pending_64_bit_constants_[j].value64()) {
DCHECK(!pending_64_bit_constants_[j].is_merged());
entry.set_merged_index(j);
size_after_marker -= kDoubleSize;
break;
}
}
}
for (int i = 0; i < num_pending_32_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
DCHECK(!entry.is_merged());
if (!entry.sharing_ok()) continue;
for (int j = 0; j < i; j++) {
if (entry.value() == pending_32_bit_constants_[j].value()) {
DCHECK(!pending_32_bit_constants_[j].is_merged());
entry.set_merged_index(j);
size_after_marker -= kPointerSize;
break;
}
}
}
int size = size_up_to_marker + size_after_marker;
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@ -3789,11 +3834,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
RecordComment("[ Constant Pool");
RecordConstPool(size);
Label size_check;
bind(&size_check);
// Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) {
b(&after_pool);
}
if (require_jump) b(size - kPcLoadDelta);
// Put down constant pool marker "Undefined instruction".
// The data size helps disassembly know what to print.
@ -3806,104 +3851,77 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
for (int i = 0; i < num_pending_64_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc());
Instr instr = instr_at(entry.position());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint10(delta));
bool found = false;
uint64_t value = rinfo.raw_data64();
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
if (value == rinfo2.raw_data64()) {
found = true;
DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
Instr instr2 = instr_at(rinfo2.pc());
DCHECK(IsVldrDPcImmediateOffset(instr2));
delta = GetVldrDRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
break;
if (entry.is_merged()) {
ConstantPoolEntry& merged =
pending_64_bit_constants_[entry.merged_index()];
DCHECK(entry.value64() == merged.value64());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsVldrDPcImmediateOffset(merged_instr));
delta = GetVldrDRegisterImmediateOffset(merged_instr);
delta += merged.position() - entry.position();
}
}
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
if (!found) {
uint64_t uint_data = rinfo.raw_data64();
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
instr_at_put(entry.position(),
SetVldrDRegisterImmediateOffset(instr, delta));
if (!entry.is_merged()) {
DCHECK(IsAligned(reinterpret_cast<intptr_t>(pc_), kDoubleAlignment));
dq(entry.value64());
}
}
// Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL &&
rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
for (int i = 0; i < num_pending_32_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
Instr instr = instr_at(entry.position());
// 64-bit loads shouldn't get here.
DCHECK(!IsVldrDPcImmediateOffset(instr));
DCHECK(!IsMovW(instr));
DCHECK(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint12(delta));
// 0 is the smallest delta:
// ldr rd, [pc, #0]
// constant pool marker
// data
bool found = false;
if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
if ((rinfo2.data() == rinfo.data()) &&
(rinfo2.rmode() == rinfo.rmode())) {
Instr instr2 = instr_at(rinfo2.pc());
if (IsLdrPcImmediateOffset(instr2)) {
delta = GetLdrRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
found = true;
break;
}
}
}
}
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
if (!found) {
emit(rinfo.data());
if (entry.is_merged()) {
DCHECK(entry.sharing_ok());
ConstantPoolEntry& merged =
pending_32_bit_constants_[entry.merged_index()];
DCHECK(entry.value() == merged.value());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsLdrPcImmediateOffset(merged_instr));
delta = GetLdrRegisterImmediateOffset(merged_instr);
delta += merged.position() - entry.position();
}
} else {
DCHECK(IsMovW(instr));
instr_at_put(entry.position(),
SetLdrRegisterImmediateOffset(instr, delta));
if (!entry.is_merged()) {
emit(entry.value());
}
}
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
RecordComment("]");
if (after_pool.is_linked()) {
bind(&after_pool);
}
DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
}
// Since a constant pool was just emitted, move the check offset forward by
@ -3912,229 +3930,61 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
if (!FLAG_enable_ool_constant_pool) {
return isolate->factory()->empty_constant_pool_array();
}
return constant_pool_builder_.New(isolate);
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
constant_pool_builder_.Populate(this, constant_pool);
}
ConstantPoolBuilder::ConstantPoolBuilder()
: entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
bool ConstantPoolBuilder::IsEmpty() {
return entries_.size() == 0;
}
ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
RelocInfo::Mode rmode) {
if (rmode == RelocInfo::NONE64) {
return ConstantPoolArray::INT64;
} else if (!RelocInfo::IsGCRelocMode(rmode)) {
return ConstantPoolArray::INT32;
} else if (RelocInfo::IsCodeTarget(rmode)) {
return ConstantPoolArray::CODE_PTR;
} else {
DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
return ConstantPoolArray::HEAP_PTR;
}
}
ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
Assembler* assm, const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
DCHECK(rmode != RelocInfo::COMMENT &&
rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL);
// Try to merge entries which won't be patched.
int merged_index = -1;
ConstantPoolArray::LayoutSection entry_section = current_section_;
if (RelocInfo::IsNone(rmode) ||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<ConstantPoolEntry>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
// Merge with found entry.
merged_index = i;
entry_section = entries_[i].section_;
break;
}
}
}
DCHECK(entry_section <= current_section_);
entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
if (merged_index == -1) {
// Not merged, so update the appropriate count.
number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
}
// Check if we still have room for another entry in the small section
// given Arm's ldr and vldr immediate offset range.
if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
!(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
is_uint10(ConstantPoolArray::MaxInt64Offset(
small_entries()->count_of(ConstantPoolArray::INT64))))) {
current_section_ = ConstantPoolArray::EXTENDED_SECTION;
}
return entry_section;
}
void ConstantPoolBuilder::Relocate(int pc_delta) {
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
}
}
Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) {
return isolate->factory()->empty_constant_pool_array();
} else if (extended_entries()->is_empty()) {
return isolate->factory()->NewConstantPoolArray(*small_entries());
} else {
DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
return isolate->factory()->NewExtendedConstantPoolArray(
*small_entries(), *extended_entries());
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
DCHECK_EQ(extended_entries()->is_empty(),
!constant_pool->is_extended_layout());
DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::SMALL_SECTION)));
if (constant_pool->is_extended_layout()) {
DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
}
// Set up initial offsets.
int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
[ConstantPoolArray::NUMBER_OF_TYPES];
for (int section = 0; section <= constant_pool->final_section(); section++) {
int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
? small_entries()->total_count()
: 0;
for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
if (number_of_entries_[section].count_of(type) != 0) {
offsets[section][type] = constant_pool->OffsetOfElementAt(
number_of_entries_[section].base_of(type) + section_start);
}
}
}
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
RelocInfo rinfo = entry->rinfo_;
RelocInfo::Mode rmode = entry->rinfo_.rmode();
ConstantPoolArray::Type type = GetConstantPoolType(rmode);
// Update constant pool if necessary and get the entry's offset.
int offset;
if (entry->merged_index_ == -1) {
offset = offsets[entry->section_][type];
offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
if (type == ConstantPoolArray::INT64) {
constant_pool->set_at_offset(offset, rinfo.data64());
} else if (type == ConstantPoolArray::INT32) {
constant_pool->set_at_offset(offset,
static_cast<int32_t>(rinfo.data()));
} else if (type == ConstantPoolArray::CODE_PTR) {
constant_pool->set_at_offset(offset,
reinterpret_cast<Address>(rinfo.data()));
} else {
DCHECK(type == ConstantPoolArray::HEAP_PTR);
constant_pool->set_at_offset(offset,
reinterpret_cast<Object*>(rinfo.data()));
}
offset -= kHeapObjectTag;
entry->merged_index_ = offset; // Stash offset for merged entries.
} else {
DCHECK(entry->merged_index_ < (entry - entries_.begin()));
offset = entries_[entry->merged_index_].merged_index_;
}
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
DCHECK(FLAG_enable_embedded_constant_pool);
Address pc = buffer_ + pc_offset;
// Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo.pc());
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
Instr instr = instr_at(pc);
if (access == ConstantPoolEntry::OVERFLOWED) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
DCHECK((Assembler::IsMovW(instr) &&
Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((Assembler::IsMovT(next_instr) &&
Instr next_instr = instr_at(pc + kInstrSize);
DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchMovwImmediate(next_instr, offset >> 16));
instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
instr_at_put(pc + kInstrSize,
PatchMovwImmediate(next_instr, offset >> 16));
} else {
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
DCHECK((Assembler::IsMovImmed(instr) &&
Instruction::Immed8Value(instr) == 0));
DCHECK((Assembler::IsOrrImmed(instr_2) &&
Instruction::Immed8Value(instr_2) == 0) &&
Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
DCHECK((Assembler::IsOrrImmed(instr_3) &&
Instruction::Immed8Value(instr_3) == 0) &&
Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
DCHECK((Assembler::IsOrrImmed(instr_4) &&
Instruction::Immed8Value(instr_4) == 0) &&
Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put(
rinfo.pc() + 2 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put(
rinfo.pc() + 3 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolArray::INT64) {
Instr instr_2 = instr_at(pc + kInstrSize);
Instr instr_3 = instr_at(pc + 2 * kInstrSize);
Instr instr_4 = instr_at(pc + 3 * kInstrSize);
DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
GetRn(instr_2).is(GetRd(instr_2)));
DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
GetRn(instr_3).is(GetRd(instr_3)));
DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
GetRn(instr_4).is(GetRd(instr_4)));
instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
instr_at_put(pc + kInstrSize,
PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
instr_at_put(pc + 2 * kInstrSize,
PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
instr_at_put(pc + 3 * kInstrSize,
PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolEntry::DOUBLE) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK((IsVldrDPpImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint10(offset));
assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
instr, offset));
instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK((IsLdrPpImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint12(offset));
assm->instr_at_put(
rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
}
instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
}
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

139
deps/v8/src/arm/assembler-arm.h

@ -94,7 +94,7 @@ const int kRegister_pc_Code = 15;
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters =
FLAG_enable_ool_constant_pool ? 8 : 9;
FLAG_enable_embedded_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
@ -122,7 +122,7 @@ struct Register {
"r7",
"r8",
};
if (FLAG_enable_ool_constant_pool && (index >= 7)) {
if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
return names[index + 1];
}
return names[index];
@ -164,7 +164,7 @@ const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as context register.
const Register r7 = {kRegister_r7_Code};
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
// Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code };
@ -651,52 +651,6 @@ class NeonListOperand BASE_EMBEDDED {
};
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
ConstantPoolBuilder();
ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline ConstantPoolArray::LayoutSection current_section() const {
return current_section_;
}
inline ConstantPoolArray::NumberOfEntries* number_of_entries(
ConstantPoolArray::LayoutSection section) {
return &number_of_entries_[section];
}
inline ConstantPoolArray::NumberOfEntries* small_entries() {
return number_of_entries(ConstantPoolArray::SMALL_SECTION);
}
inline ConstantPoolArray::NumberOfEntries* extended_entries() {
return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
}
private:
struct ConstantPoolEntry {
ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
int merged_index)
: rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
RelocInfo rinfo_;
ConstantPoolArray::LayoutSection section_;
int merged_index_;
};
ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
std::vector<ConstantPoolEntry> entries_;
ConstantPoolArray::LayoutSection current_section_;
ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
struct VmovIndex {
unsigned char index;
};
@ -754,19 +708,16 @@ class Assembler : public AssemblerBase {
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool));
INLINE(static Address constant_pool_entry_address(Address pc,
Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool));
INLINE(static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
@ -774,7 +725,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@ -841,6 +792,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@ -1448,11 +1402,13 @@ class Assembler : public AssemblerBase {
void RecordConstPool(int size);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
// for inline tables, e.g., jump-tables. CheckConstantPool() should be
// called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
void dq(uint64_t data);
void dp(uintptr_t data) { dd(data); }
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
@ -1524,8 +1480,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1540,17 +1496,19 @@ class Assembler : public AssemblerBase {
}
}
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
int EmitEmbeddedConstantPool() {
DCHECK(FLAG_enable_embedded_constant_pool);
return constant_pool_builder_.Emit(this);
}
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
bool ConstantPoolAccessIsInOverflow() const {
return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
ConstantPoolEntry::OVERFLOWED;
}
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@ -1585,10 +1543,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize <
DCHECK((num_pending_32_bit_constants_ == 0) ||
(start + num_pending_64_bit_constants_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
DCHECK((num_pending_64_bit_constants_ == 0) ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@ -1647,20 +1605,20 @@ class Assembler : public AssemblerBase {
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending relocation info.
RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
// Number of pending reloc info entries in the 32 bits buffer.
int num_pending_32_bit_reloc_info_;
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
// The buffers of pending constant pool entries.
ConstantPoolEntry pending_32_bit_constants_[kMaxNumPending32Constants];
ConstantPoolEntry pending_64_bit_constants_[kMaxNumPending64Constants];
// Number of pending constant pool entries in the 32 bits buffer.
int num_pending_32_bit_constants_;
// Number of pending constant pool entries in the 64 bits buffer.
int num_pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
@ -1689,15 +1647,12 @@ class Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L);
enum UseConstantPoolMode {
USE_CONSTANT_POOL,
DONT_USE_CONSTANT_POOL
};
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
friend class RelocInfo;
friend class CodePatcher;

143
deps/v8/src/arm/builtins-arm.cc

@ -343,6 +343,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -367,10 +368,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2);
}
// Preserve the two incoming parameters on the stack.
// Preserve the incoming parameters on the stack.
__ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
__ push(r0);
__ push(r1);
if (use_new_target) {
__ push(r3);
}
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r1, r3);
@ -446,7 +450,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
// r3: object size (not including memento if create_memento)
// r3: object size (including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@ -520,7 +524,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
// allocated object if not fall through to runtime call if it is.
// allocated object if not; allocate and initialize a FixedArray if yes.
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
@ -575,15 +579,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
}
__ InitializeFieldsWithFiller(r2, r6, r0);
// Store the initialized FixedArray into the properties field of
// the JSObject
@ -617,7 +614,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
__ ldr(r2, MemOperand(sp, kPointerSize * 2));
int offset = (use_new_target ? 3 : 2) * kPointerSize;
__ ldr(r2, MemOperand(sp, offset));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@ -631,23 +629,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
// Restore the parameters.
if (use_new_target) {
__ pop(r3);
}
__ pop(r1);
// Retrieve smi-tagged arguments count from the stack.
__ ldr(r0, MemOperand(sp));
__ SmiUntag(r0);
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
if (use_new_target) {
__ push(r3);
}
__ push(r4);
__ push(r4);
// Reload the number of arguments and the constructor from the stack.
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: constructor function
// sp[3]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ ldr(r3, MemOperand(sp, 3 * kPointerSize));
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
__ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
// r1: constructor function
@ -655,9 +657,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: constructor function
// sp[3]: number of arguments (smi-tagged)
// sp[2]: new.target (if used)
// sp[2/3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry);
__ bind(&loop);
__ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
@ -680,15 +683,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function) {
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -699,8 +704,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@ -718,9 +723,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kPointerSize;
__ ldr(r1, MemOperand(sp, offset));
// Leave construct frame.
}
@ -733,12 +739,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false);
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@ -789,8 +800,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop);
__ add(r0, r0, Operand(1));
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@ -819,7 +828,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r0: result
// sp[0]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ ldr(r1, MemOperand(sp, 0));
// Get arguments count, skipping over new.target.
__ ldr(r1, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@ -874,7 +884,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
// r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
// r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@ -922,7 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
if (!FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand(r4));
}
if (kR9Available == 1) {
@ -1166,8 +1176,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
}
// Load the OSR entrypoint offset from the deoptimization data.
@ -1175,10 +1187,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Compute the target address = code start + osr_offset
__ add(lr, r0, Operand::SmiUntag(r1));
// And "return" to the OSR entry point of the function.
__ Ret();
@ -1392,6 +1402,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ ldr(key, MemOperand(fp, indexOffset));
__ b(&entry);
@ -1401,7 +1413,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ mov(slot, Operand(Smi::FromInt(index)));
__ Move(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@ -1649,7 +1668,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
@ -1722,6 +1741,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
// If the function is strong we need to throw an error.
Label no_strong_error;
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
kSmiTagSize)));
__ b(eq, &no_strong_error);
// What we really care about is the required number of arguments.
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
__ cmp(r0, Operand::SmiUntag(r4));
__ b(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into r0 and copy end address is fp.
@ -1792,6 +1832,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

227
deps/v8/src/arm/code-stubs-arm.cc

@ -93,9 +93,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond);
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@ -113,15 +112,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
r0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
__ push(descriptor.GetEnvironmentParameterRegister(i));
__ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@ -238,9 +237,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond) {
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
@ -251,10 +249,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ cmp(r4, Operand(HEAP_NUMBER_TYPE));
__ b(eq, &return_equal);
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@ -262,8 +270,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -561,7 +577,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc);
EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@ -663,7 +679,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
@ -1084,10 +1101,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
}
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2);
}
@ -1132,8 +1149,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: argc
// r4: argv
int marker = type();
if (FLAG_enable_ool_constant_pool) {
__ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
if (FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand::Zero());
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@ -1142,7 +1159,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
(FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
(FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
ip.bit());
// Set up frame pointer for the frame to be pushed.
@ -1331,11 +1348,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
__ mov(r8, map);
__ mov(scratch, map);
// |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
__ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function,
__ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@ -1473,9 +1490,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss);
@ -1494,9 +1510,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@ -1520,7 +1535,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@ -1578,8 +1592,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1608,8 +1620,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r6 : allocated object (tagged)
// r9 : mapped parameter count (tagged)
CHECK(!has_new_target());
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// r1 = parameter count (tagged)
@ -1666,7 +1676,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
@ -1850,14 +1860,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
__ cmp(r1, Operand(Smi::FromInt(0)));
Label skip_decrement;
__ b(eq, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(r1, r1, Operand(2));
__ bind(&skip_decrement);
}
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
@ -1939,9 +1941,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
// sp[0] : index of rest parameter
// sp[4] : number of parameters
// sp[8] : receiver displacement
// sp[0] : language mode
// sp[4] : index of rest parameter
// sp[8] : number of parameters
// sp[12] : receiver displacement
Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1951,13 +1954,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 1 * kPointerSize));
__ str(r1, MemOperand(sp, 2 * kPointerSize));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 2 * kPointerSize));
__ str(r3, MemOperand(sp, 3 * kPointerSize));
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
__ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@ -2418,7 +2421,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
Register weak_value = r8;
Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
@ -2703,6 +2706,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ mov(r2, r4);
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@ -2762,6 +2772,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@ -2837,6 +2854,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
// r2 - vector
// r3 - slot
@ -2937,9 +2959,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
@ -2954,9 +2976,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@ -3567,7 +3589,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4348,15 +4370,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@ -4375,12 +4397,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@ -4474,14 +4494,14 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register name = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register name = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
Register scratch1 = r8;
Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4521,24 +4541,24 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register key = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register key = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
Register scratch1 = r8;
Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4568,7 +4588,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@ -4592,6 +4612,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@ -5297,6 +5369,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

3
deps/v8/src/arm/codegen-arm.cc

@ -946,6 +946,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

3
deps/v8/src/arm/constants-arm.cc

@ -126,6 +126,7 @@ int Registers::Number(const char* name) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

5
deps/v8/src/arm/constants-arm.h

@ -42,6 +42,11 @@ const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
const int kPCRegister = 15;
const int kNoRegister = -1;
// Used in embedded constant pool builder - max reach in bits for
// various load instructions (unsigned)
const int kLdrMaxReachBits = 12;
const int kVldrMaxReachBits = 10;
// -----------------------------------------------------------------------------
// Conditions.

3
deps/v8/src/arm/cpu-arm.cc

@ -77,6 +77,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

52
deps/v8/src/arm/debug-arm.cc

@ -140,53 +140,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.bit() | name.bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for keyed IC load (from ic-arm.cc).
GenerateLoadICDebugBreak(masm);
}
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
}
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@ -267,7 +220,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_ool_constant_pool).
// FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@ -289,6 +242,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

5
deps/v8/src/arm/deoptimizer-arm.cc

@ -353,11 +353,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
SetFrameSlot(offset, value);
}
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

4
deps/v8/src/arm/disasm-arm.cc

@ -1904,8 +1904,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
//------------------------------------------------------------------------------

14
deps/v8/src/arm/frames-arm.cc

@ -21,7 +21,7 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
@ -29,18 +29,12 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
Object*& ExitFrame::constant_pool_slot() const {
DCHECK(FLAG_enable_ool_constant_pool);
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

54
deps/v8/src/arm/frames-arm.h

@ -66,11 +66,23 @@ const int kNumDoubleCalleeSaved = 8;
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// The embedded constant pool pointer (r8/pp) is not included in the safepoint
// since it is not tagged. This register is preserved in the stack frame where
// its value will be updated if GC code movement occurs. Including it in the
// safepoint (where it will not be relocated) would cause a stale value to be
// restored.
const RegList kConstantPointerRegMask =
FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
const int kNumConstantPoolPointerReg =
FLAG_enable_embedded_constant_pool ? 1 : 0;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
const RegList kSafepointSavedRegisters =
kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
// ----------------------------------------------------
@ -84,11 +96,11 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = FLAG_enable_ool_constant_pool ?
3 * kPointerSize : 2 * kPointerSize;
static const int kFrameSize =
FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
-3 * kPointerSize : 0;
static const int kConstantPoolOffset =
FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@ -116,36 +128,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kLengthOffset = -4 * kPointerSize;
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);

853
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

296
deps/v8/src/arm/interface-descriptors-arm.cc

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
const Register LoadDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return r2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2};
data->Initialize(arraysize(registers), registers, NULL);
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3};
data->Initialize(arraysize(registers), registers, NULL);
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::Initialize(
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r3, r2, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::Initialize(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::Initialize(
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StoreArrayLiteralElementDescriptor::Initialize(
void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r3, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r1, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3, r2};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r1, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
@ -166,234 +162,206 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, r0, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r0, r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::Initialize(
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::Initialize(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0, r1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r0, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::Initialize(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {cp, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r2, r0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- constructor function
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::Initialize(
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r1, // JSFunction
r0, // actual number of arguments
r2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
r3, // actual number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

79
deps/v8/src/arm/lithium-arm.cc

@ -1092,10 +1092,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op =
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@ -1105,20 +1113,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
@ -1869,7 +1863,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@ -2197,7 +2191,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2271,7 +2265,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2336,8 +2330,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
return MarkAsCall(result, instr);
}
@ -2369,6 +2371,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = Use(instr->object());
LOperand* elements = Use(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, r0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@ -2407,8 +2424,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@ -2485,7 +2509,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index);
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@ -2602,7 +2626,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@ -2671,4 +2695,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
return MarkAsCall(DefineFixed(result, cp), instr);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

83
deps/v8/src/arm/lithium-arm.h

@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@ -153,7 +154,6 @@ class LCodeGen;
V(SubI) \
V(RSubI) \
V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@ -474,26 +474,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -1196,6 +1176,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); }
};
@ -1567,7 +1549,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); }
Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@ -1865,8 +1847,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone);
}
@ -1876,6 +1862,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2188,17 +2178,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2247,22 +2242,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
LOperand* key,
LOperand* value) {
LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2318,6 +2315,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

290
deps/v8/src/arm/lithium-codegen-arm.cc

@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function.
// cp: Callee's context.
// pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
// pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
@ -121,7 +121,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
!info_->is_native() && info_->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@ -197,8 +197,9 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
int first_parameter = scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@ -595,52 +596,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
AddToTranslation(
environment, translation, value, environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@ -960,28 +926,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length();
i < length;
i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
DefineDeoptimizationLiteral(function);
}
inlined_function_count_ = deoptimization_literals_.length();
}
@ -1016,10 +965,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
// Register pp always contains a pointer to the constant pool.
safepoint.DefinePointerRegister(pp, zone());
}
}
@ -1936,20 +1881,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(r0));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
__ SmiTst(object);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp));
@ -2174,8 +2114,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
Handle<Code> code =
CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@ -2611,7 +2551,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@ -2885,13 +2826,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
int additional_delta = (call_size / Assembler::kInstrSize) + 4;
{
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(
masm_, (additional_delta + 1) * Assembler::kInstrSize);
// Make sure we don't emit any additional entries in the constant pool before
// the call to ensure that the CallCodeSize() calculated the correct number of
// instructions for the constant pool load.
masm_, additional_delta * Assembler::kInstrSize);
// The labels must be already bound since the code has predictabel size up
// to the call instruction.
DCHECK(map_check->is_bound());
DCHECK(bool_load->is_bound());
// Make sure we don't emit any additional entries in the constant pool
// before the call to ensure that the CallCodeSize() calculated the
// correct number of instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
int map_check_delta =
@ -2912,10 +2858,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ nop();
}
}
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
}
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r0) into the result register slot and
@ -2928,7 +2873,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@ -2986,10 +2932,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Register slot_register = LoadDescriptor::SlotRegister();
DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(r0));
AllowDeferredHandleDereference vector_structure_check;
@ -3002,6 +2947,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
@ -3009,11 +2968,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3108,11 +3065,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3269,7 +3225,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@ -3420,9 +3377,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3961,29 +3918,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
Register scratch = r4;
Register extra = r5;
Register extra2 = r6;
Register extra3 = r9;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
@ -4274,10 +4208,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -4385,7 +4323,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@ -4498,6 +4437,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@ -4505,6 +4448,100 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = r0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ jmp(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ cmp(ToRegister(current_capacity), Operand(constant_key));
__ b(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ cmp(ToRegister(key), Operand(constant_capacity));
__ b(ge, deferred->entry());
} else {
__ cmp(ToRegister(key), ToRegister(current_capacity));
__ b(ge, deferred->entry());
}
if (instr->elements()->IsRegister()) {
__ Move(result, ToRegister(instr->elements()));
} else {
__ ldr(result, ToMemOperand(instr->elements()));
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = r0;
__ mov(result, Operand::Zero());
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
if (instr->object()->IsRegister()) {
__ Move(result, ToRegister(instr->object()));
} else {
__ ldr(result, ToMemOperand(instr->object()));
}
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Move(r3, ToRegister(key));
__ SmiTag(r3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@ -5957,4 +5994,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

6
deps/v8/src/arm/lithium-codegen-arm.h

@ -27,7 +27,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@ -112,6 +111,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@ -241,7 +241,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -324,10 +323,11 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;

3
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -299,4 +299,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

76
deps/v8/src/arm/macro-assembler-arm.cc

@ -691,28 +691,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
cp.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() |
lr.bit());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
cp.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() |
lr.bit());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
// Safepoints expect a block of contiguous register values starting with r0.
// except when FLAG_enable_embedded_constant_pool, which omits pp.
DCHECK(kSafepointSavedRegisters ==
(FLAG_enable_embedded_constant_pool
? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
: (1 << kNumSafepointSavedRegisters) - 1));
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
@ -742,6 +742,10 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
// RegList omits pp.
reg_code -= 1;
}
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
@ -985,13 +989,20 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
DCHECK(FLAG_enable_embedded_constant_pool);
ldr(pp, MemOperand(code_target_address,
Code::kConstantPoolOffset - Code::kHeaderSize));
add(pp, pp, code_target_address);
}
void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
pc_offset() - Instruction::kPCReadOffset;
DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
ldr(pp, MemOperand(pc, constant_pool_offset));
}
DCHECK(FLAG_enable_embedded_constant_pool);
int entry_offset = pc_offset() + Instruction::kPCReadOffset;
sub(ip, pc, Operand(entry_offset));
LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
@ -1000,9 +1011,9 @@ void MacroAssembler::StubPrologue() {
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true);
set_constant_pool_available(true);
}
}
@ -1025,9 +1036,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true);
set_constant_pool_available(true);
}
}
@ -1036,7 +1047,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
@ -1056,9 +1067,9 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer
// (if FLAG_enable_ool_constant_pool).
// (if FLAG_enable_embedded_constant_pool).
int frame_ends;
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
frame_ends = pc_offset();
ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
@ -1084,7 +1095,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(ip, Operand(CodeObject()));
@ -1103,7 +1114,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot, code slot and constant pool slot (if
// FLAG_enable_ool_constant_pool) were pushed after the fp.
// FLAG_enable_embedded_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@ -1183,7 +1194,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
#endif
// Tear down the exit frame, pop the arguments, and return.
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(sp, Operand(fp));
@ -1559,6 +1570,7 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
add(t0, t0, scratch);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
bic(t0, t0, Operand(0xc0000000u));
}
@ -3162,7 +3174,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
b(lt, &loop);
b(lo, &loop);
}
@ -3390,7 +3402,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
@ -3401,7 +3413,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location));
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
// Check if this is an extended constant pool load.
and_(scratch, result, Operand(GetConsantPoolLoadMask()));
teq(scratch, Operand(GetConsantPoolLoadPattern()));
@ -3455,7 +3467,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
bind(&load_result);
// Get the address of the constant.
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
add(result, pp, Operand(result));
} else {
add(result, ldr_location, Operand(result));

10
deps/v8/src/arm/macro-assembler-arm.h

@ -437,7 +437,7 @@ class MacroAssembler: public Assembler {
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
// FLAG_enable_ool_constant_pool), context and JS function / marker id if
// FLAG_enable_embedded_constant_pool), context and JS function / marker id if
// marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
@ -1441,6 +1441,11 @@ class MacroAssembler: public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
void LoadConstantPoolPointerRegister();
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@ -1482,9 +1487,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegister();
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.

3
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1193,6 +1193,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

29
deps/v8/src/arm/simulator-arm.cc

@ -774,8 +774,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
}
Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@ -824,7 +823,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@ -834,6 +833,14 @@ class Redirection {
return redirection->external_function();
}
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private:
void* external_function_;
uint32_t swi_instruction_;
@ -842,6 +849,19 @@ class Redirection {
};
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
delete i_cache;
}
}
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@ -4131,7 +4151,8 @@ uintptr_t Simulator::PopAddress() {
return address;
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // USE_SIMULATOR

2
deps/v8/src/arm/simulator-arm.h

@ -194,6 +194,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.

25
deps/v8/src/arm64/assembler-arm64-inl.h

@ -586,14 +586,13 @@ Address Assembler::target_pointer_address_at(Address pc) {
// Read/Modify the code target address in the branch/call instruction at pc.
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::Address_at(target_pointer_address_at(pc));
}
Address Assembler::target_address_at(Address pc, Code* code) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
@ -665,8 +664,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
@ -685,7 +683,7 @@ void Assembler::set_target_address_at(Address pc,
Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@ -867,8 +865,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
}
@ -1084,13 +1082,14 @@ Instr Assembler::SF(Register rd) {
}
Instr Assembler::ImmAddSub(int64_t imm) {
Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset;
imm <<= ImmAddSub_offset;
} else {
return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
}
return imm;
}
@ -1239,13 +1238,13 @@ LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
}
Instr Assembler::ImmMoveWide(uint64_t imm) {
Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
Instr Assembler::ShiftMoveWide(int64_t shift) {
Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}

101
deps/v8/src/arm64/assembler-arm64.cc

@ -580,8 +580,9 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos();
desc->reloc_size =
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
}
}
@ -600,13 +601,13 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
int linkoffset = label->pos();
int64_t linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;
int64_t linkpcoffset = link->ImmPCOffset();
int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
@ -645,7 +646,8 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// currently referring to this label.
label->Unuse();
} else {
label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
label->link_to(
static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
}
} else if (branch == next_link) {
@ -721,7 +723,7 @@ void Assembler::bind(Label* label) {
while (label->is_linked()) {
int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset);
int prevlinkoffset = linkoffset + link->ImmPCOffset();
int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label);
@ -811,12 +813,13 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
link_pcoffset = link->ImmPCOffset();
link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
int max_reachable_pc = InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType());
int max_reachable_pc =
static_cast<int>(InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
@ -888,12 +891,12 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
// 0: ldr xzr, #<size of pool>
bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
// It is still worth asserting the marker is complete.
// 4: blr xzr
DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
instr->following()->Rn() == xzr.code()));
instr->following()->Rn() == kZeroRegCode));
return result;
}
@ -909,7 +912,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
const char* message =
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = kDebugMessageOffset + strlen(message) + 1;
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
@ -1599,9 +1602,11 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop;
if (addr.IsImmediateOffset()) {
@ -1645,11 +1650,11 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), size));
DCHECK(IsImmLSPair(addr.offset(), size));
int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
@ -2137,13 +2142,13 @@ Instr Assembler::ImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000
uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
}
@ -2188,8 +2193,8 @@ void Assembler::MoveWide(const Register& rd,
DCHECK(is_uint16(imm));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
@ -2205,7 +2210,7 @@ void Assembler::AddSub(const Register& rd,
DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(immediate) | dest_reg | RnSP(rn));
ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
@ -2259,7 +2264,7 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@ -2362,7 +2367,8 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
ccmpop = ConditionalCompareImmediateFixed | op |
ImmCondCmp(static_cast<unsigned>(immediate));
} else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@ -2502,15 +2508,16 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
if (IsImmLSScaled(offset, size)) {
if (IsImmLSScaled(addr.offset(), size)) {
int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size));
} else if (IsImmLSUnscaled(offset)) {
} else if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else {
@ -2536,7 +2543,8 @@ void Assembler::LoadStore(const CPURegister& rt,
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
if (IsImmLSUnscaled(offset)) {
if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
@ -2568,6 +2576,14 @@ bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@ -2849,7 +2865,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
desc.reloc_size =
static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer;
@ -3065,7 +3082,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
// Record the veneer pool size.
int pool_size = SizeOfCodeGeneratedSince(&size_check);
int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) {
@ -3113,7 +3130,8 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
return static_cast<int>(reloc_info_writer.pos() -
reinterpret_cast<byte*>(pc_));
}
@ -3124,20 +3142,6 @@ void Assembler::RecordConstPool(int size) {
}
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return;
}
void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
@ -3171,6 +3175,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

40
deps/v8/src/arm64/assembler-arm64.h

@ -764,7 +764,7 @@ class ConstPool {
shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const {
return shared_entries_count + unique_entries_.size();
return shared_entries_count + static_cast<int>(unique_entries_.size());
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
@ -851,6 +851,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
inline void Unreachable();
@ -871,13 +874,10 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool);
inline static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
@ -951,7 +951,7 @@ class Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the
// current position.
int InstructionsGeneratedSince(const Label* label) {
uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
@ -1767,6 +1767,8 @@ class Assembler : public AssemblerBase {
// Required by V8.
void dd(uint32_t data) { dc32(data); }
void db(uint8_t data) { dc8(data); }
void dq(uint64_t data) { dc64(data); }
void dp(uintptr_t data) { dc64(data); }
// Code generation helpers --------------------------------------------------
@ -1774,7 +1776,7 @@ class Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
Instruction* InstructionAt(ptrdiff_t offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
}
@ -1841,7 +1843,7 @@ class Assembler : public AssemblerBase {
// Data Processing encoding.
inline static Instr SF(Register rd);
inline static Instr ImmAddSub(int64_t imm);
inline static Instr ImmAddSub(int imm);
inline static Instr ImmS(unsigned imms, unsigned reg_size);
inline static Instr ImmR(unsigned immr, unsigned reg_size);
inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
@ -1876,10 +1878,11 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift);
inline static Instr ImmMoveWide(int imm);
inline static Instr ShiftMoveWide(int shift);
// FP Immediates.
static Instr ImmFP32(float imm);
@ -1908,11 +1911,12 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
// No embedded constant pool support.
UNREACHABLE();
}
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.

117
deps/v8/src/arm64/builtins-arm64.cc

@ -331,6 +331,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@ -360,11 +361,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
Register original_constructor = x3;
// x1: constructor function
// Preserve the incoming parameters on the stack.
__ SmiTag(argc);
if (use_new_target) {
__ Push(argc, constructor, original_constructor);
} else {
__ Push(argc, constructor);
// sp[0] : Constructor function.
// sp[1]: number of arguments (smi-tagged)
}
// sp[0]: new.target (if used)
// sp[0/1]: Constructor function.
// sp[1/2]: number of arguments (smi-tagged)
Label rt_call, count_incremented, allocated, normal_new;
__ Cmp(constructor, original_constructor);
@ -522,7 +529,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(new_obj, new_obj, kHeapObjectTag);
// Check if a non-empty properties array is needed. Continue with
// allocated object if not, or fall through to runtime call if it is.
// allocated object if not; allocate and initialize a FixedArray if yes.
Register element_count = x3;
__ Ldrb(element_count,
FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
@ -580,7 +587,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&allocated);
if (create_memento) {
__ Peek(x10, 2 * kXRegSize);
int offset = (use_new_target ? 3 : 2) * kXRegSize;
__ Peek(x10, offset);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@ -592,18 +600,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
__ Push(x4, x4);
// Restore the parameters.
if (use_new_target) {
__ Pop(original_constructor);
}
__ Pop(constructor);
// Reload the number of arguments from the stack.
// Set it up in x0 for the function call below.
// jssp[0]: receiver
// jssp[1]: receiver
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
__ Peek(constructor, 2 * kXRegSize); // Load constructor.
__ Peek(argc, 3 * kXRegSize); // Load number of arguments.
// jssp[0]: number of arguments (smi-tagged)
__ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
if (use_new_target) {
__ Push(original_constructor, x4, x4);
} else {
__ Push(x4, x4);
}
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -614,8 +628,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
// jssp[2]: new.target (if used)
// jssp[2/3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@ -646,15 +660,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function) {
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -665,8 +681,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// x0: result
// jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
// jssp[1]: number of arguments (smi-tagged)
__ JumpIfSmi(x0, &use_receiver);
// If the type of the result (stored in its map) is less than
@ -683,9 +698,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
__ Peek(x1, 2 * kXRegSize);
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kXRegSize;
__ Peek(x1, offset);
// Leave construct frame.
}
@ -698,12 +714,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false);
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@ -731,7 +752,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// sp[1]: new.target
// sp[2]: receiver (the hole)
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -759,8 +779,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Drop(1);
__ Bind(&done_copying_arguments);
__ Add(x0, x0, Operand(1)); // new.target
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@ -787,8 +805,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// jssp[0]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load number of arguments (smi).
__ Peek(x1, 0);
// Load number of arguments (smi), skipping over new.target.
__ Peek(x1, kPointerSize);
// Leave construct frame
}
@ -1388,6 +1406,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ Ldr(key, MemOperand(fp, indexOffset));
__ B(&entry);
@ -1397,7 +1417,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ Mov(slot, Smi::FromInt(index));
__ Mov(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@ -1733,13 +1760,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ Bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10;
Register copy_end = x11;
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
// If the function is strong we need to throw an error.
Label no_strong_error;
__ Ldr(scratch1,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAllClear(scratch2.W(),
(1 << SharedFunctionInfo::kStrongModeFunction),
&no_strong_error);
// What we really care about is the required number of arguments.
DCHECK_EQ(kPointerSize, kInt64Size);
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
__ Cmp(argc_actual, Operand(scratch2, LSR, 1));
__ B(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@ -1810,6 +1862,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

234
deps/v8/src/arm64/code-stubs-arm64.cc

@ -102,17 +102,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
queue.Queue(descriptor.GetRegisterParameter(i));
}
queue.PushQueued();
@ -203,13 +203,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Register left,
Register right,
Register scratch,
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
FPRegister double_scratch,
Label* slow,
Condition cond) {
Label* slow, Condition cond,
Strength strength) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@ -223,10 +221,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal.
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
__ B(eq, &return_equal);
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@ -235,8 +243,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -513,7 +529,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
strength());
// If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber.
@ -632,7 +649,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
@ -1433,9 +1451,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@ -1455,9 +1472,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = x0;
Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@ -1669,7 +1685,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
Register key = ArgumentsAccessReadDescriptor::index();
DCHECK(arg_count.is(x0));
@ -1726,8 +1741,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// jssp[8]: address of receiver argument
// jssp[16]: function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Register caller_fp = x10;
@ -1759,8 +1772,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
//
// Returns pointer to result object in x0.
CHECK(!has_new_target());
// Note: arg_count_smi is an alias of param_count_smi.
Register arg_count_smi = x3;
Register param_count_smi = x3;
@ -1869,8 +1880,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
__ Ldr(aliased_args_map,
ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
__ Ldr(
aliased_args_map,
ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
__ Cmp(mapped_params, 0);
__ CmovX(sloppy_args_map, aliased_args_map, ne);
@ -2087,15 +2099,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
if (has_new_target()) {
__ Cmp(param_count, Operand(0));
Label skip_decrement;
__ B(eq, &skip_decrement);
// Skip new.target: it is not a part of arguments.
__ Sub(param_count, param_count, Operand(1));
__ SmiTag(param_count_smi, param_count);
__ Bind(&skip_decrement);
}
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@ -2192,19 +2195,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
// jssp[0]: index of rest parameter (tagged)
// jssp[8]: number of parameters (tagged)
// jssp[16]: address of receiver argument
// jssp[0]: language mode (tagged)
// jssp[8]: index of rest parameter (tagged)
// jssp[16]: number of parameters (tagged)
// jssp[24]: address of receiver argument
//
// Returns pointer to result object in x0.
// Get the stub arguments from the frame, and make an untagged copy of the
// parameter count.
Register rest_index_smi = x1;
Register param_count_smi = x2;
Register params = x3;
Register language_mode_smi = x1;
Register rest_index_smi = x2;
Register param_count_smi = x3;
Register params = x4;
Register param_count = x13;
__ Pop(rest_index_smi, param_count_smi, params);
__ Pop(language_mode_smi, rest_index_smi, param_count_smi, params);
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
@ -2217,9 +2222,10 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &runtime);
// x1 rest_index_smi index of rest parameter
// x2 param_count_smi number of parameters passed to function (smi)
// x3 params pointer to parameters
// x1 language_mode_smi language mode
// x2 rest_index_smi index of rest parameter
// x3 param_count_smi number of parameters passed to function (smi)
// x4 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
@ -2232,8 +2238,8 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
__ Push(params, param_count_smi, rest_index_smi);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
__ Push(params, param_count_smi, rest_index_smi, language_mode_smi);
__ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@ -3086,10 +3092,18 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
Register allocation_site = feedback_vector;
Register original_constructor = index;
__ Mov(allocation_site, scratch);
Register original_constructor = x3;
__ Mov(original_constructor, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@ -3155,6 +3169,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(function, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@ -3230,6 +3253,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
__ Adds(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
// x2 - vector
// x3 - slot
@ -3324,9 +3353,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
@ -3341,9 +3370,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, VectorLoadICDescriptor::SlotRegister(),
VectorLoadICDescriptor::VectorRegister());
if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
} else {
__ Pop(object_);
}
@ -3471,7 +3500,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4467,15 +4496,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@ -4494,12 +4523,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@ -4596,11 +4623,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register name = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register name = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@ -4640,21 +4667,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register key = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register key = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@ -4686,7 +4713,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
@ -4710,6 +4737,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@ -5412,7 +5491,7 @@ static const int kCallApiFunctionSpillSpace = 4;
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
return static_cast<int>(ref0.address() - ref1.address());
}
@ -5751,6 +5830,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

6
deps/v8/src/arm64/code-stubs-arm64.h

@ -138,8 +138,10 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
auto offset_to_incremental_noncompacting =
static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:

3
deps/v8/src/arm64/codegen-arm64.cc

@ -634,6 +634,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

2
deps/v8/src/arm64/constants-arm64.h

@ -84,6 +84,8 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000;
const unsigned kIp0Code = 16;
const unsigned kIp1Code = 17;
const unsigned kFramePointerRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kZeroRegCode = 31;

3
deps/v8/src/arm64/cpu-arm64.cc

@ -120,6 +120,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

50
deps/v8/src/arm64/debug-arm64.cc

@ -203,53 +203,6 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.Bit() | name.Bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0, x10);
}
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm64.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for keyed IC load (from ic-arm.cc).
GenerateLoadICDebugBreak(masm);
}
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@ -346,6 +299,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool LiveEdit::kFrameDropperSupported = true;
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

3
deps/v8/src/arm64/decoder-arm64.cc

@ -81,6 +81,7 @@ VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

3
deps/v8/src/arm64/delayed-masm-arm64.cc

@ -193,6 +193,7 @@ void DelayedMasm::EmitPending() {
ResetPending();
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

5
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -354,11 +354,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
// No embedded constant pool support.
UNREACHABLE();
}
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

24
deps/v8/src/arm64/disasm-arm64.cc

@ -1369,11 +1369,12 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
@ -1383,13 +1384,13 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
case 'L': {
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
instr->ImmLLiteral() << kLoadLiteralScaleLog2);
AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
<< kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
if (instr->ImmLS() != 0) {
AppendToOutput(", #%" PRId64, instr->ImmLS());
AppendToOutput(", #%" PRId32, instr->ImmLS());
}
return 3;
}
@ -1397,14 +1398,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number.
int scale = format[3] - 0x30;
AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
}
return 4;
}
case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) {
AppendToOutput(", #%" PRIu64,
instr->ImmLSUnsigned() << instr->SizeLS());
AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
<< instr->SizeLS());
}
return 3;
}
@ -1427,7 +1428,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
} else {
AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
return 9;
}
@ -1538,7 +1539,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
instr->ImmDPShift());
}
return 3;
@ -1729,7 +1730,8 @@ void PrintDisassembler::ProcessOutput(Instruction* instr) {
GetOutput());
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
namespace disasm {

9
deps/v8/src/arm64/frames-arm64.cc

@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

30
deps/v8/src/arm64/frames-arm64.h

@ -63,36 +63,6 @@ class JavaScriptFrameConstants : public AllStatic {
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);

855
deps/v8/src/arm64/full-codegen-arm64.cc

File diff suppressed because it is too large

30
deps/v8/src/arm64/instructions-arm64.cc

@ -93,9 +93,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
int32_t n = BitN();
int32_t imm_s = ImmSetBits();
int32_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
@ -211,7 +211,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
@ -242,7 +242,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
ptrdiff_t target_offset = DistanceTo(target);
Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset);
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
@ -254,9 +254,11 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(IsValidImmPCOffset(BranchType(),
DistanceTo(target) >> kInstructionSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
@ -287,9 +289,9 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
DCHECK(is_int32(target_offset));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
int32_t target_offset =
static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@ -302,8 +304,9 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
@ -316,7 +319,7 @@ void Instruction::SetImmLLiteral(Instruction* source) {
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
@ -334,6 +337,7 @@ uint64_t InstructionSequence::InlineData() const {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

14
deps/v8/src/arm64/instructions-arm64.h

@ -137,8 +137,8 @@ class Instruction {
return following(-count);
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@ -146,8 +146,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@ -369,7 +369,7 @@ class Instruction {
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
@ -409,9 +409,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(int offset) {
return is_int21(offset);
}
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

3
deps/v8/src/arm64/instrument-arm64.cc

@ -591,4 +591,5 @@ void Instrument::VisitUnimplemented(Instruction* instr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

311
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
const Register LoadDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
@ -62,389 +66,338 @@ const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return x2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
Register registers[] = {cp, x2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3};
data->Initialize(arraysize(registers), registers, NULL);
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::Initialize(
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: array literals array
// x2: array literal index
// x1: constant elements
Register registers[] = {cp, x3, x2, x1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x3, x2, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::Initialize(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: object literals array
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {cp, x3, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x3, x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::Initialize(
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {cp, x2, x3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
Register registers[] = {cp, x2, x3, x1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x2, x3, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StoreArrayLiteralElementDescriptor::Initialize(
void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x3, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3, x2};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, x0, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0, x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::Initialize(
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: length
// x1: index (of last match)
// x0: string
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::Initialize(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x0: value (js_array)
// x1: to_map
Register registers[] = {cp, x0, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::Initialize(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x2, x0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// x1: constructor function
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::Initialize(
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value to compare
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: allocation site
// x1: left operand
// x0: right operand
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x1, // JSFunction
x0, // actual number of arguments
x2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
x3, // actual number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
};
data->Initialize(arraysize(registers), registers, representations,
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

80
deps/v8/src/arm64/lithium-arm64.cc

@ -1050,10 +1050,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op =
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@ -1391,7 +1399,7 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), x0);
LDateField* result = new(zone()) LDateField(object, instr->index());
return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@ -1504,7 +1512,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@ -1588,20 +1596,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@ -1700,7 +1694,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@ -1766,7 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -1788,7 +1782,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2028,7 +2022,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index);
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@ -2402,8 +2396,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
}
@ -2442,7 +2444,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@ -2567,6 +2577,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseRegister(instr->object());
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, x0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), x3);
@ -2763,4 +2788,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

83
deps/v8/src/arm64/lithium-arm64.h

@ -125,6 +125,7 @@ class LCodeGen;
V(MathRoundD) \
V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@ -164,7 +165,6 @@ class LCodeGen;
V(SubI) \
V(SubS) \
V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@ -318,26 +318,6 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
};
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -739,7 +719,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); }
Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@ -1181,6 +1161,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); }
};
@ -1550,8 +1532,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone);
}
@ -1561,6 +1547,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2563,22 +2553,24 @@ class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
};
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
LOperand* key,
LOperand* value) {
LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2615,17 +2607,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2637,6 +2634,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
};
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

329
deps/v8/src/arm64/lithium-codegen-arm64.cc

@ -224,55 +224,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
AddToTranslation(
environment, translation, value, environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@ -345,16 +307,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
environment->set_has_been_used();
@ -435,6 +387,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -449,6 +402,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -504,6 +458,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -525,7 +480,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
__ Ldr(cp, ToMemOperand(context));
__ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@ -669,7 +624,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
!info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@ -728,8 +683,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
int first_parameter = scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
Register value = x0;
Register scratch = x3;
@ -743,8 +699,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(value, target);
// Update the write barrier. This clobbers value and scratch.
if (need_write_barrier) {
__ RecordWriteContextSlot(cp, target.offset(), value, scratch,
GetLinkRegisterState(), kSaveFPRegs);
__ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
value, scratch, GetLinkRegisterState(),
kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@ -995,15 +952,10 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length(); i < length; i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
DefineDeoptimizationLiteral(function);
}
inlined_function_count_ = deoptimization_literals_.length();
}
@ -1281,13 +1233,37 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
}
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return MemOperand(fp, StackSlotOffset(op->index()));
int fp_offset = StackSlotOffset(op->index());
// Loads and stores have a bigger reach in positive offset than negative.
// We try to access using jssp (positive offset) first, then fall back to
// fp (negative offset) if that fails.
//
// We can reference a stack slot from jssp only if we know how much we've
// put on the stack. We don't know this in the following cases:
// - stack_mode != kCanUseStackPointer: this is the case when deferred
// code has saved the registers.
// - saves_caller_doubles(): some double registers have been pushed, jssp
// references the end of the double registers and not the end of the stack
// slots.
// In both of the cases above, we _could_ add the tracking information
// required so that we can use jssp here, but in practice it isn't worth it.
if ((stack_mode == kCanUseStackPointer) &&
!info()->saves_caller_doubles()) {
int jssp_offset_to_fp =
StandardFrameConstants::kFixedFrameSizeFromFp +
(pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
int jssp_offset = fp_offset + jssp_offset_to_fp;
if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
return MemOperand(masm()->StackPointer(), jssp_offset);
}
}
return MemOperand(fp, fp_offset);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@ -1772,8 +1748,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
Handle<Code> code =
CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@ -2021,29 +1997,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
Register scratch = x4;
Register extra = x5;
Register extra2 = x6;
Register extra3 = x7;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@ -2085,6 +2038,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
generator.AfterCall();
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2104,11 +2059,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2134,6 +2091,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default:
UNREACHABLE();
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2554,7 +2512,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@ -2653,18 +2612,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register temp1 = x10;
Register temp2 = x11;
Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ Mov(temp1, Operand(stamp));
@ -2680,9 +2635,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ Bind(&runtime);
__ Mov(x1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
}
__ Bind(&done);
}
}
@ -3196,6 +3150,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -3309,6 +3264,16 @@ void LCodeGen::DoLabel(LLabel* label) {
label->block_id(),
LabelType(label));
// Inherit pushed_arguments_ from the predecessor's argument count.
if (label->block()->HasPredecessor()) {
pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
#ifdef DEBUG
for (auto p : *label->block()->predecessors()) {
DCHECK_EQ(p->argument_count(), pushed_arguments_);
}
#endif
}
__ Bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@ -3361,10 +3326,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Register slot_register = LoadWithVectorDescriptor::SlotRegister();
DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(x0));
AllowDeferredHandleDereference vector_structure_check;
@ -3377,17 +3341,29 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
.is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3505,7 +3481,8 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@ -3657,9 +3634,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@ -3712,12 +3689,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// LoadIC expects name and receiver in registers.
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@ -4754,6 +4729,8 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
RecordPushedArgumentsDelta(instr->ArgumentCount());
}
@ -5137,14 +5114,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch,
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
scratch, GetLinkRegisterState(), kSaveFPRegs,
EMIT_REMEMBERED_SET, check_needed);
}
__ Bind(&skip_assignment);
}
@ -5221,7 +5193,8 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case SLOPPY_ARGUMENTS_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@ -5322,6 +5295,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@ -5329,6 +5306,91 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = x0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ B(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ Cmp(ToRegister(current_capacity), Operand(constant_key));
__ B(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ Cmp(ToRegister(key), Operand(constant_capacity));
__ B(ge, deferred->entry());
} else {
__ Cmp(ToRegister(key), ToRegister(current_capacity));
__ B(ge, deferred->entry());
}
__ Mov(result, ToRegister(instr->elements()));
__ Bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = x0;
__ Mov(result, 0);
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
__ Move(result, ToRegister(instr->object()));
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Mov(x3, ToRegister(key));
__ SmiTag(x3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
@ -5433,10 +5495,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -5548,7 +5614,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm());
@ -6054,5 +6121,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

22
deps/v8/src/arm64/lithium-codegen-arm64.h

@ -28,7 +28,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@ -37,7 +36,8 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
pushed_arguments_(0) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -81,7 +81,9 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
@ -114,6 +116,7 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
@ -190,6 +193,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
@ -197,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -341,7 +345,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
@ -358,6 +361,15 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
// The number of arguments pushed onto the stack, either by this block or by a
// predecessor.
int pushed_arguments_;
void RecordPushedArgumentsDelta(int delta) {
pushed_arguments_ += delta;
DCHECK(pushed_arguments_ >= 0);
}
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {

3
deps/v8/src/arm64/lithium-gap-resolver-arm64.cc

@ -292,4 +292,5 @@ void LGapResolver::EmitMove(int index) {
moves_[index].Eliminate();
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

24
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -926,8 +926,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
masm_->PushPreamble(size_);
}
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PushHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -949,8 +949,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PopHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -1263,7 +1263,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -3928,6 +3928,7 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Add(key, key, scratch);
// hash = hash ^ (hash >> 16);
Eor(key, key, Operand(key, LSR, 16));
Bic(key, key, Operand(0xc0000000u));
}
@ -4693,7 +4694,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
@ -5115,7 +5116,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
// 'check' in the other bits. The possible offset is limited in that we
// use BitField to pack the data, and the underlying data type is a
// uint32_t.
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
uint32_t delta =
static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
DCHECK(!smi_check->is_bound());
@ -5136,9 +5138,10 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
// 32-bit values.
DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
@ -5149,6 +5152,7 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

4
deps/v8/src/arm64/macro-assembler-arm64.h

@ -886,8 +886,8 @@ class MacroAssembler : public Assembler {
template<typename Field>
void DecodeField(Register dst, Register src) {
static const uint64_t shift = Field::kShift;
static const uint64_t setbits = CountSetBits(Field::kMask, 32);
static const int shift = Field::kShift;
static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}

3
deps/v8/src/arm64/regexp-macro-assembler-arm64.cc

@ -1611,6 +1611,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

1
deps/v8/src/arm64/regexp-macro-assembler-arm64.h

@ -20,6 +20,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
virtual ~RegExpMacroAssemblerARM64();
virtual void AbortedCodeGeneration() { masm_->AbortedCodeGeneration(); }
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);

169
deps/v8/src/arm64/simulator-arm64.cc

@ -490,7 +490,7 @@ class Redirection {
static Redirection* FromHltInstruction(Instruction* redirect_call) {
char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
char* addr_of_redirection =
addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
addr_of_hlt - offsetof(Redirection, redirect_call_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@ -500,6 +500,14 @@ class Redirection {
return redirection->external_function<void*>();
}
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private:
void* external_function_;
Instruction redirect_call_;
@ -508,6 +516,12 @@ class Redirection {
};
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
}
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
@ -903,10 +917,11 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
return static_cast<unsignedT>(value) >> amount;
case ASR:
return value >> amount;
case ROR:
case ROR: {
unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
return (static_cast<unsignedT>(value) >> amount) |
((value & ((1L << amount) - 1L)) <<
(sizeof(unsignedT) * 8 - amount));
((value & mask) << (sizeof(mask) * 8 - amount));
}
default:
UNIMPLEMENTED();
return 0;
@ -1399,7 +1414,8 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
} else {
int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
int32_t op2 = static_cast<int32_t>(
ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
AddSubHelper(instr, op2);
}
}
@ -1410,7 +1426,7 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
AddSubHelper<int64_t>(instr, op2);
} else {
AddSubHelper<int32_t>(instr, op2);
AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
}
}
@ -1457,7 +1473,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
LogicalHelper<int64_t>(instr, instr->ImmLogical());
} else {
LogicalHelper<int32_t>(instr, instr->ImmLogical());
LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
}
}
@ -1879,7 +1895,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
// Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16;
int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
// Compute the new value.
switch (mov_op) {
@ -1912,25 +1928,32 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) {
uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
uint64_t new_val = xreg(instr->Rm());
new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) {
case CSEL_w: set_wreg(instr->Rd(), new_val); break;
case CSEL_x: set_xreg(instr->Rd(), new_val); break;
case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
case CSEL_w:
case CSEL_x:
break;
case CSINC_w:
case CSINC_x:
new_val++;
break;
case CSINV_w:
case CSINV_x:
new_val = ~new_val;
break;
case CSNEG_w:
case CSNEG_x:
new_val = -new_val;
break;
default: UNIMPLEMENTED();
}
} else {
}
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), xreg(instr->Rn()));
set_xreg(instr->Rd(), new_val);
} else {
set_wreg(instr->Rd(), wreg(instr->Rn()));
}
set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
}
}
@ -1940,13 +1963,27 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
case RBIT_w:
set_wreg(dst, ReverseBits(wreg(src)));
break;
case RBIT_x:
set_xreg(dst, ReverseBits(xreg(src)));
break;
case REV16_w:
set_wreg(dst, ReverseBytes(wreg(src), 1));
break;
case REV16_x:
set_xreg(dst, ReverseBytes(xreg(src), 1));
break;
case REV_w:
set_wreg(dst, ReverseBytes(wreg(src), 2));
break;
case REV32_x:
set_xreg(dst, ReverseBytes(xreg(src), 2));
break;
case REV_x:
set_xreg(dst, ReverseBytes(xreg(src), 3));
break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
@ -1964,44 +2001,6 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
}
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000UL;
for (int i = 7; i >= 0; i--) {
bytes[i] = (value & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[Reverse16] is used by REV16_x, REV16_w
// permute_table[Reverse32] is used by REV32_x, REV_w
// permute_table[Reverse64] is used by REV_x
DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7} };
uint64_t result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[mode][i]];
}
return result;
}
template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
@ -2121,7 +2120,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), result);
} else {
set_wreg(instr->Rd(), result);
set_wreg(instr->Rd(), static_cast<int32_t>(result));
}
}
@ -2138,8 +2137,9 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
mask = ((1L << (S + 1)) - 1);
mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
uint64_t umask = ((1L << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
}
@ -2563,7 +2563,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// Bail out early for zero inputs.
if (mantissa == 0) {
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// If all bits in the exponent are set, the value is infinite or NaN.
@ -2580,9 +2580,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent;
mantissa = 0;
return (sign << sign_offset) |
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset);
(mantissa << mantissa_offset));
}
// Calculate the shift required to move the top mantissa bit to the proper
@ -2605,7 +2605,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) {
// The result will always be +/-0.0.
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// Properly encode the exponent for a subnormal output.
@ -2624,9 +2624,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
T result = (sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset);
T result =
static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens, the
// exponent should be incremented and the mantissa set to 1.0 (encoded as
@ -2641,9 +2641,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no
// rounding correction.
return (sign << sign_offset) |
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset);
((mantissa << -shift) << mantissa_offset));
}
}
@ -2838,7 +2838,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
uint32_t payload =
static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
@ -2859,7 +2860,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
int32_t exponent =
static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
@ -3210,11 +3212,11 @@ void Simulator::VisitSystem(Instruction* instr) {
case MSR: {
switch (instr->ImmSystemRegister()) {
case NZCV:
nzcv().SetRawValue(xreg(instr->Rt()));
nzcv().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(NZCV);
break;
case FPCR:
fpcr().SetRawValue(xreg(instr->Rt()));
fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
default: UNIMPLEMENTED();
@ -3835,6 +3837,7 @@ void Simulator::DoPrintf(Instruction* instr) {
#endif // USE_SIMULATOR
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

17
deps/v8/src/arm64/simulator-arm64.h

@ -72,12 +72,6 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
@ -169,6 +163,8 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
@ -706,9 +702,6 @@ class Simulator : public DecoderVisitor {
template <typename T>
void BitfieldHelper(Instruction* instr);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
template <typename T>
T FPDefaultNaN() const;
@ -885,9 +878,9 @@ class Simulator : public DecoderVisitor {
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
static_cast<int>( \
Simulator::current(Isolate::Current()) \
->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from

5
deps/v8/src/arm64/utils-arm64.cc

@ -74,7 +74,7 @@ int CountSetBits(uint64_t value, int width) {
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
return static_cast<int>(value);
}
@ -89,6 +89,7 @@ int MaskToBit(uint64_t mask) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

43
deps/v8/src/arm64/utils-arm64.h

@ -61,6 +61,49 @@ uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBits(T value) {
DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
(sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
DCHECK((1U << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000;
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[0] is used by REV16_x, REV16_w
// permute_table[1] is used by REV32_x, REV_w
// permute_table[2] is used by REV_x
DCHECK((0 < block_bytes_log2) && (block_bytes_log2 < 4));
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
T result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
}
return result;
}
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);

20
deps/v8/src/array-iterator.js

@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var $iteratorCreateResultObject;
var $arrayValues;
(function(global, shared, exports) {
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalArray = global.Array;
var GlobalObject = global.Object;
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
@ -122,19 +120,19 @@ function ArrayKeys() {
}
%FunctionSetPrototype(ArrayIterator, new GlobalObject());
%FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
$installFunctions(ArrayIterator.prototype, DONT_ENUM, [
utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
$setFunctionName(ArrayIteratorIterator, symbolIterator);
utils.SetFunctionName(ArrayIteratorIterator, symbolIterator);
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
$installFunctions(GlobalArray.prototype, DONT_ENUM, [
utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'keys', ArrayKeys
@ -153,7 +151,13 @@ endmacro
TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
$iteratorCreateResultObject = CreateIteratorResultObject;
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
to.ArrayIteratorCreateResultObject = CreateIteratorResultObject;
});
$arrayValues = ArrayValues;
})

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save