Browse Source

deps: update V8 to 5.1.281.69

Pick up the latest branch-head for V8 5.1. This branch brings in
improved language support and performance improvements. For full
details: http://v8project.blogspot.com/2016/04/v8-release-51.html

* Picks up the latest branch head for 5.1 [1]
* Edit v8 gitignore to allow trace_event copy
* Update V8 DEP trace_event as per deps/v8/DEPS [2]

[1] https://chromium.googlesource.com/v8/v8.git/+/dc81244
[2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/c8c8665

PR-URL: https://github.com/nodejs/node/pull/7016
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
v7.x
Michaël Zasso 9 years ago
parent
commit
2cc2951796
No known key found for this signature in database GPG Key ID: 770F7A9A5AE15600
  1. 2
      deps/v8/.ycm_extra_conf.py
  2. 2
      deps/v8/AUTHORS
  3. 444
      deps/v8/BUILD.gn
  4. 8
      deps/v8/CODE_OF_CONDUCT.md
  5. 1761
      deps/v8/ChangeLog
  6. 16
      deps/v8/DEPS
  7. 7
      deps/v8/Makefile
  8. 7
      deps/v8/OWNERS
  9. 16
      deps/v8/base/trace_event/common/trace_event_common.h
  10. 36
      deps/v8/build/coverage_wrapper.py
  11. 1
      deps/v8/build/get_landmines.py
  12. 2
      deps/v8/build/isolate.gypi
  13. 64
      deps/v8/build/standalone.gypi
  14. 24
      deps/v8/build/toolchain.gypi
  15. 2
      deps/v8/include/libplatform/libplatform.h
  16. 14
      deps/v8/include/v8-debug.h
  17. 2
      deps/v8/include/v8-experimental.h
  18. 6
      deps/v8/include/v8-platform.h
  19. 6
      deps/v8/include/v8-version.h
  20. 255
      deps/v8/include/v8.h
  21. 8
      deps/v8/include/v8config.h
  22. 7
      deps/v8/infra/config/cq.cfg
  23. 3
      deps/v8/src/DEPS
  24. 3
      deps/v8/src/accessors.cc
  25. 31
      deps/v8/src/api-arguments.cc
  26. 254
      deps/v8/src/api-arguments.h
  27. 15
      deps/v8/src/api-experimental.cc
  28. 74
      deps/v8/src/api-natives.cc
  29. 415
      deps/v8/src/api.cc
  30. 49
      deps/v8/src/api.h
  31. 85
      deps/v8/src/arguments.cc
  32. 222
      deps/v8/src/arguments.h
  33. 16
      deps/v8/src/arm/assembler-arm-inl.h
  34. 41
      deps/v8/src/arm/assembler-arm.cc
  35. 8
      deps/v8/src/arm/assembler-arm.h
  36. 147
      deps/v8/src/arm/builtins-arm.cc
  37. 248
      deps/v8/src/arm/code-stubs-arm.cc
  38. 4
      deps/v8/src/arm/codegen-arm.cc
  39. 2
      deps/v8/src/arm/constants-arm.h
  40. 13
      deps/v8/src/arm/deoptimizer-arm.cc
  41. 30
      deps/v8/src/arm/disasm-arm.cc
  42. 15
      deps/v8/src/arm/frames-arm.h
  43. 72
      deps/v8/src/arm/interface-descriptors-arm.cc
  44. 371
      deps/v8/src/arm/macro-assembler-arm.cc
  45. 39
      deps/v8/src/arm/macro-assembler-arm.h
  46. 61
      deps/v8/src/arm/simulator-arm.cc
  47. 5
      deps/v8/src/arm/simulator-arm.h
  48. 16
      deps/v8/src/arm64/assembler-arm64-inl.h
  49. 10
      deps/v8/src/arm64/assembler-arm64.cc
  50. 8
      deps/v8/src/arm64/assembler-arm64.h
  51. 142
      deps/v8/src/arm64/builtins-arm64.cc
  52. 240
      deps/v8/src/arm64/code-stubs-arm64.cc
  53. 2
      deps/v8/src/arm64/cpu-arm64.cc
  54. 16
      deps/v8/src/arm64/deoptimizer-arm64.cc
  55. 15
      deps/v8/src/arm64/frames-arm64.h
  56. 80
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  57. 229
      deps/v8/src/arm64/macro-assembler-arm64.cc
  58. 16
      deps/v8/src/arm64/macro-assembler-arm64.h
  59. 247
      deps/v8/src/assembler.cc
  60. 89
      deps/v8/src/assembler.h
  61. 14
      deps/v8/src/ast/ast-numbering.cc
  62. 1
      deps/v8/src/ast/ast-value-factory.h
  63. 55
      deps/v8/src/ast/ast.cc
  64. 136
      deps/v8/src/ast/ast.h
  65. 7
      deps/v8/src/ast/prettyprinter.cc
  66. 68
      deps/v8/src/ast/scopes.cc
  67. 32
      deps/v8/src/ast/scopes.h
  68. 5
      deps/v8/src/background-parsing-task.cc
  69. 16
      deps/v8/src/bailout-reason.h
  70. 33
      deps/v8/src/base/accounting-allocator.cc
  71. 34
      deps/v8/src/base/accounting-allocator.h
  72. 5
      deps/v8/src/base/atomicops_internals_arm_gcc.h
  73. 7
      deps/v8/src/base/cpu.cc
  74. 11
      deps/v8/src/base/logging.cc
  75. 2
      deps/v8/src/base/logging.h
  76. 11
      deps/v8/src/base/macros.h
  77. 6
      deps/v8/src/base/platform/platform-linux.cc
  78. 17
      deps/v8/src/base/platform/platform-posix.cc
  79. 1
      deps/v8/src/base/platform/platform-win32.cc
  80. 5
      deps/v8/src/base/platform/platform.h
  81. 3
      deps/v8/src/base/platform/semaphore.cc
  82. 84
      deps/v8/src/base/platform/time.cc
  83. 7
      deps/v8/src/base/platform/time.h
  84. 2
      deps/v8/src/base/win32-headers.h
  85. 420
      deps/v8/src/bootstrapper.cc
  86. 1155
      deps/v8/src/builtins.cc
  87. 62
      deps/v8/src/builtins.h
  88. 180
      deps/v8/src/code-factory.cc
  89. 34
      deps/v8/src/code-factory.h
  90. 284
      deps/v8/src/code-stubs-hydrogen.cc
  91. 2748
      deps/v8/src/code-stubs.cc
  92. 573
      deps/v8/src/code-stubs.h
  93. 2
      deps/v8/src/codegen.h
  94. 247
      deps/v8/src/collector.h
  95. 853
      deps/v8/src/compiler.cc
  96. 274
      deps/v8/src/compiler.h
  97. 8
      deps/v8/src/compiler/access-info.cc
  98. 180
      deps/v8/src/compiler/arm/code-generator-arm.cc
  99. 6
      deps/v8/src/compiler/arm/instruction-codes-arm.h
  100. 6
      deps/v8/src/compiler/arm/instruction-scheduler-arm.cc

2
deps/v8/.ycm_extra_conf.py

@ -42,7 +42,7 @@ import sys
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
'-std=gnu++0x',
'-std=gnu++11',
'-x',
'c++',
]

2
deps/v8/AUTHORS

@ -71,6 +71,7 @@ Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Julien Brianceau <jbriance@cisco.com>
JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
@ -87,6 +88,7 @@ Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Milton Chiang <milton.chiang@mediatek.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>

444
deps/v8/BUILD.gn

@ -21,6 +21,12 @@ declare_args() {
# Enable the snapshot feature, for fast context creation.
# http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
v8_use_snapshot = true
# Similar to vfp but on MIPS.
v8_can_use_fpu_instructions = true
# Similar to the ARM hard float ABI but on MIPS.
v8_use_mips_abi_hardfloat = true
}
# TODO(jochen): These will need to be user-settable to support standalone V8
@ -28,7 +34,7 @@ declare_args() {
v8_deprecation_warnings = false
v8_enable_disassembler = false
v8_enable_gdbjit = false
v8_enable_handle_zapping = is_debug
v8_enable_handle_zapping = false
v8_enable_i18n_support = true
v8_enable_verify_heap = false
v8_interpreted_regexp = false
@ -77,6 +83,11 @@ config("internal_config_base") {
include_dirs = [ "." ]
}
# This config should be applied to code using the libplatform.
config("libplatform_config") {
include_dirs = [ "include" ]
}
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
@ -137,48 +148,93 @@ config("toolchain") {
defines = []
cflags = []
# TODO(jochen): Add support for arm subarchs, mips, mipsel, mips64el.
if (v8_target_arch == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
if (current_cpu == "arm") {
if (arm_version == 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3-d16") {
defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
} else if (arm_fpu == "vfpv3") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
]
} else if (arm_fpu == "neon") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
"CAN_USE_NEON",
]
}
} else {
# These defines ares used for the ARM simulator.
if (arm_version == 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3-d16") {
defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
} else if (arm_fpu == "vfpv3") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
]
} else if (arm_fpu == "neon") {
defines += [
"CAN_USE_ARMV7_INSTRUCTIONS",
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
"USE_EABI_HARDFLOAT=0",
"CAN_USE_NEON",
]
}
# TODO(jochen): Add support for arm_test_noprobe.
if (current_cpu != "arm") {
# These defines ares used for the ARM simulator.
if (arm_float_abi == "hard") {
defines += [ "USE_EABI_HARDFLOAT=1" ]
} else if (arm_float_abi == "softfp") {
defines += [ "USE_EABI_HARDFLOAT=0" ]
}
}
}
if (v8_target_arch == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
# TODO(jochen): Add support for mips.
if (v8_target_arch == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
if (v8_use_mips_abi_hardfloat) {
defines += [
"__mips_hard_float=1",
"CAN_USE_FPU_INSTRUCTIONS",
]
} else {
defines += [ "__mips_soft_float=1" ]
}
if (mips_arch_variant == "r6") {
defines += [
"_MIPS_ARCH_MIPS32R6",
"FPU_MODE_FP64",
]
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS32R2" ]
if (mips_fpu_mode == "fp64") {
defines += [ "FPU_MODE_FP64" ]
} else if (mips_fpu_mode == "fpxx") {
defines += [ "FPU_MODE_FPXX" ]
} else if (mips_fpu_mode == "fp32") {
defines += [ "FPU_MODE_FP32" ]
}
} else if (mips_arch_variant == "r1") {
defines += [ "FPU_MODE_FP32" ]
}
# TODO(jochen): Add support for mips_arch_variant rx and loongson.
}
# TODO(jochen): Add support for mips64.
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
# TODO(jochen): Add support for big endian host byteorder.
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
if (v8_use_mips_abi_hardfloat) {
defines += [
"__mips_hard_float=1",
"CAN_USE_FPU_INSTRUCTIONS",
]
} else {
defines += [ "__mips_soft_float=1" ]
}
if (mips_arch_variant == "r6") {
defines += [ "_MIPS_ARCH_MIPS64R6" ]
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
}
if (v8_target_arch == "s390") {
defines += [ "V8_TARGET_ARCH_S390" ]
@ -227,8 +283,11 @@ action("js2c") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
inputs = [
"tools/jsmin.py",
]
# NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
@ -257,6 +316,7 @@ action("js2c") {
"src/js/string-iterator.js",
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@ -291,21 +351,24 @@ action("js2c_experimental") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
inputs = [
"tools/jsmin.py",
]
# NOSORT
sources = [
"src/js/macros.py",
"src/messages.h",
"src/js/proxy.js",
"src/js/generator.js",
"src/js/harmony-atomics.js",
"src/js/harmony-regexp.js",
"src/js/harmony-regexp-exec.js",
"src/js/harmony-object-observe.js",
"src/js/harmony-sharedarraybuffer.js",
"src/js/harmony-simd.js",
"src/js/harmony-species.js",
"src/js/harmony-unicode-regexps.js",
"src/js/promise-extra.js"
"src/js/harmony-string-padding.js",
"src/js/promise-extra.js",
]
outputs = [
@ -334,7 +397,9 @@ action("js2c_extras") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
inputs = [
"tools/jsmin.py",
]
sources = v8_extra_library_files
@ -343,8 +408,7 @@ action("js2c_extras") {
]
args = [
rebase_path("$target_gen_dir/extras-libraries.cc",
root_build_dir),
rebase_path("$target_gen_dir/extras-libraries.cc", root_build_dir),
"EXTRAS",
] + rebase_path(sources, root_build_dir)
@ -364,7 +428,9 @@ action("js2c_experimental_extras") {
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
inputs = [
"tools/jsmin.py",
]
sources = v8_experimental_extra_library_files
@ -382,7 +448,8 @@ action("js2c_experimental_extras") {
outputs += [ "$target_gen_dir/libraries_experimental_extras.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_experimental_extras.bin", root_build_dir),
rebase_path("$target_gen_dir/libraries_experimental_extras.bin",
root_build_dir),
]
}
}
@ -392,6 +459,7 @@ action("d8_js2c") {
script = "tools/js2c.py"
# NOSORT
inputs = [
"src/d8.js",
"src/js/macros.py",
@ -425,10 +493,11 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
":js2c_extras",
]
# NOSORT
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin",
@ -456,6 +525,7 @@ action("postmortem-metadata") {
script = "tools/gen-postmortem-metadata.py"
# NOSORT
sources = [
"src/objects.h",
"src/objects-inl.h",
@ -486,9 +556,6 @@ action("run_mksnapshot") {
"./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--log-snapshot-positions",
"--logfile",
rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
"--startup_src",
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
@ -519,16 +586,16 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"src/snapshot/snapshot-empty.cc",
]
@ -552,8 +619,8 @@ source_set("v8_snapshot") {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
public_deps = [
@ -563,10 +630,10 @@ source_set("v8_snapshot") {
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
]
@ -586,8 +653,8 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
public_deps = [
@ -629,22 +696,24 @@ source_set("v8_base") {
"src/accessors.h",
"src/address-map.cc",
"src/address-map.h",
"src/allocation.cc",
"src/allocation.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
"src/api.cc",
"src/api.h",
"src/allocation.cc",
"src/allocation.h",
"src/api-arguments.cc",
"src/api-arguments.h",
"src/api-experimental.cc",
"src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/api.cc",
"src/api.h",
"src/arguments.cc",
"src/arguments.h",
"src/assembler.cc",
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-expression-visitor.cc",
@ -684,27 +753,30 @@ source_set("v8_base") {
"src/bootstrapper.h",
"src/builtins.cc",
"src/builtins.h",
"src/cancelable-task.cc",
"src/cancelable-task.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/char-predicates.cc",
"src/cancelable-task.cc",
"src/cancelable-task.h",
"src/char-predicates-inl.h",
"src/char-predicates.cc",
"src/char-predicates.h",
"src/checks.h",
"src/code-factory.cc",
"src/code-factory.h",
"src/code-stubs-hydrogen.cc",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/code-stubs-hydrogen.cc",
"src/codegen.cc",
"src/codegen.h",
"src/collector.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-dependencies.cc",
"src/compilation-dependencies.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
"src/compiler.cc",
"src/compiler.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
"src/compiler/access-info.cc",
@ -723,9 +795,9 @@ source_set("v8_base") {
"src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
"src/compiler/c-linkage.cc",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/c-linkage.cc",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
@ -748,18 +820,16 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
"src/compiler/escape-analysis.cc",
"src/compiler/escape-analysis.h",
"src/compiler/escape-analysis-reducer.cc",
"src/compiler/escape-analysis-reducer.h",
"src/compiler/fast-accessor-assembler.cc",
"src/compiler/fast-accessor-assembler.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/escape-analysis.cc",
"src/compiler/escape-analysis.h",
"src/compiler/frame-elider.cc",
"src/compiler/frame-elider.h",
"src/compiler/frame-states.cc",
"src/compiler/frame-states.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-reducer.cc",
@ -800,10 +870,10 @@ source_set("v8_base") {
"src/compiler/js-global-object-specialization.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining-heuristic.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-intrinsic-lowering.cc",
"src/compiler/js-intrinsic-lowering.h",
"src/compiler/js-native-context-specialization.cc",
@ -822,9 +892,9 @@ source_set("v8_base") {
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-peeling.cc",
"src/compiler/loop-analysis.cc",
"src/compiler/loop-analysis.h",
"src/compiler/loop-peeling.cc",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
@ -850,16 +920,16 @@ source_set("v8_base") {
"src/compiler/operator.h",
"src/compiler/osr.cc",
"src/compiler/osr.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
@ -895,8 +965,6 @@ source_set("v8_base") {
"src/compiler/wasm-linkage.cc",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
"src/compiler.cc",
"src/compiler.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts-inl.h",
@ -907,11 +975,11 @@ source_set("v8_base") {
"src/conversions.h",
"src/counters.cc",
"src/counters.h",
"src/crankshaft/compilation-phase.cc",
"src/crankshaft/compilation-phase.h",
"src/crankshaft/hydrogen-alias-analysis.h",
"src/crankshaft/hydrogen-bce.cc",
"src/crankshaft/hydrogen-bce.h",
"src/crankshaft/hydrogen-bch.cc",
"src/crankshaft/hydrogen-bch.h",
"src/crankshaft/hydrogen-canonicalize.cc",
"src/crankshaft/hydrogen-canonicalize.h",
"src/crankshaft/hydrogen-check-elimination.cc",
@ -1011,12 +1079,16 @@ source_set("v8_base") {
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.cc",
"src/extensions/trigger-failure-extension.h",
"src/external-reference-table.cc",
"src/external-reference-table.h",
"src/factory.cc",
"src/factory.h",
"src/fast-accessor-assembler.cc",
"src/fast-accessor-assembler.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/field-index.h",
"src/field-index-inl.h",
"src/field-index.h",
"src/field-type.cc",
"src/field-type.h",
"src/fixed-dtoa.cc",
@ -1064,47 +1136,43 @@ source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/page-parallel-job.h",
"src/heap/remembered-set.cc",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
"src/heap/slot-set.h",
"src/heap/slots-buffer.cc",
"src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
"src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/i18n.cc",
"src/i18n.h",
"src/icu_util.cc",
"src/icu_util.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
"src/ic/ic.cc",
"src/ic/ic.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
"src/icu_util.cc",
"src/icu_util.h",
"src/identity-map.cc",
"src/identity-map.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.cc",
@ -1114,18 +1182,20 @@ source_set("v8_base") {
"src/interpreter/bytecode-register-allocator.cc",
"src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/constant-array-builder.cc",
"src/interpreter/constant-array-builder.h",
"src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
"src/interpreter/handler-table-builder.cc",
"src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/interpreter/interpreter-assembler.cc",
"src/interpreter/interpreter-assembler.h",
"src/interpreter/register-translator.cc",
"src/interpreter/register-translator.h",
"src/interpreter/interpreter-intrinsics.cc",
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
"src/interpreter/source-position-table.cc",
"src/interpreter/source-position-table.h",
"src/isolate-inl.h",
@ -1133,8 +1203,8 @@ source_set("v8_base") {
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
"src/key-accumulator.h",
"src/key-accumulator.cc",
"src/keys.cc",
"src/keys.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
@ -1147,9 +1217,9 @@ source_set("v8_base") {
"src/log.h",
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
"src/machine-type.cc",
"src/machine-type.h",
"src/macro-assembler.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
@ -1188,6 +1258,8 @@ source_set("v8_base") {
"src/parsing/token.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@ -1279,19 +1351,29 @@ source_set("v8_base") {
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
"src/snapshot/natives.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
"src/snapshot/natives-common.cc",
"src/snapshot/serialize.cc",
"src/snapshot/serialize.h",
"src/snapshot/natives.h",
"src/snapshot/partial-serializer.cc",
"src/snapshot/partial-serializer.h",
"src/snapshot/serializer-common.cc",
"src/snapshot/serializer-common.h",
"src/snapshot/serializer.cc",
"src/snapshot/serializer.h",
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/snapshot/snapshot.h",
"src/snapshot/startup-serializer.cc",
"src/snapshot/startup-serializer.h",
"src/source-position.h",
"src/splay-tree.h",
"src/splay-tree-inl.h",
"src/snapshot/snapshot.h",
"src/startup-data-util.h",
"src/splay-tree.h",
"src/startup-data-util.cc",
"src/startup-data-util.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.h",
@ -1299,6 +1381,8 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
"src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/transitions-inl.h",
@ -1317,13 +1401,13 @@ source_set("v8_base") {
"src/typing-asm.h",
"src/typing-reset.cc",
"src/typing-reset.h",
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
"src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
@ -1345,6 +1429,7 @@ source_set("v8_base") {
"src/wasm/encoder.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/wasm-external-refs.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-macro-gen.h",
@ -1354,26 +1439,24 @@ source_set("v8_base") {
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
"src/zone-containers.h",
"src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h",
"src/zone.cc",
"src/zone.h",
]
if (v8_target_arch == "x86") {
sources += [
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.h",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.cc",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.h",
"src/crankshaft/ia32/lithium-ia32.cc",
"src/crankshaft/ia32/lithium-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
@ -1394,8 +1477,8 @@ source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
@ -1416,8 +1499,8 @@ source_set("v8_base") {
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
@ -1448,8 +1531,8 @@ source_set("v8_base") {
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
"src/arm/codegen-arm.h",
"src/arm/constants-arm.h",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
@ -1483,19 +1566,19 @@ source_set("v8_base") {
]
} else if (v8_target_arch == "arm64") {
sources += [
"src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
"src/arm64/assembler-arm64-inl.h",
"src/arm64/builtins-arm64.cc",
"src/arm64/codegen-arm64.cc",
"src/arm64/codegen-arm64.h",
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
"src/arm64/codegen-arm64.cc",
"src/arm64/codegen-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/decoder-arm64-inl.h",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
@ -1507,9 +1590,9 @@ source_set("v8_base") {
"src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/interface-descriptors-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
@ -1518,9 +1601,9 @@ source_set("v8_base") {
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
"src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/lithium-arm64.cc",
"src/crankshaft/arm64/lithium-arm64.h",
"src/crankshaft/arm64/lithium-codegen-arm64.cc",
@ -1553,17 +1636,17 @@ source_set("v8_base") {
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/assembler-mips-inl.h",
"src/mips/builtins-mips.cc",
"src/mips/codegen-mips.cc",
"src/mips/codegen-mips.h",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
"src/mips/codegen-mips.cc",
"src/mips/codegen-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@ -1595,17 +1678,17 @@ source_set("v8_base") {
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/builtins-mips64.cc",
"src/mips64/codegen-mips64.cc",
"src/mips64/codegen-mips64.h",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
"src/mips64/codegen-mips64.cc",
"src/mips64/codegen-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
@ -1621,6 +1704,48 @@ source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
} else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
sources += [
"src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.h",
"src/crankshaft/s390/lithium-gap-resolver-s390.cc",
"src/crankshaft/s390/lithium-gap-resolver-s390.h",
"src/crankshaft/s390/lithium-s390.cc",
"src/crankshaft/s390/lithium-s390.h",
"src/debug/s390/debug-s390.cc",
"src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc",
"src/ic/s390/ic-compiler-s390.cc",
"src/ic/s390/ic-s390.cc",
"src/ic/s390/stub-cache-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
"src/s390/builtins-s390.cc",
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
"src/s390/codegen-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
"src/s390/deoptimizer-s390.cc",
"src/s390/disasm-s390.cc",
"src/s390/frames-s390.cc",
"src/s390/frames-s390.h",
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
}
configs -= [ "//build/config/compiler:chromium_code" ]
@ -1671,14 +1796,16 @@ source_set("v8_libbase") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/base/accounting-allocator.cc",
"src/base/accounting-allocator.h",
"src/base/adapters.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_s390_gcc.h",
"src/base/atomicops_internals_tsan.h",
@ -1702,16 +1829,16 @@ source_set("v8_libbase") {
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
"src/base/platform/elapsed-timer.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
"src/base/platform/condition-variable.cc",
"src/base/platform/condition-variable.h",
"src/base/platform/elapsed-timer.h",
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
"src/base/platform/platform.h",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
@ -1745,10 +1872,16 @@ source_set("v8_libbase") {
if (is_linux) {
sources += [ "src/base/platform/platform-linux.cc" ]
libs = [ "dl", "rt" ]
libs = [
"dl",
"rt",
]
} else if (is_android) {
if (current_toolchain == host_toolchain) {
libs = [ "dl", "rt" ]
libs = [
"dl",
"rt",
]
if (host_os == "mac") {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
@ -1818,6 +1951,7 @@ source_set("fuzzer_support") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config_base",
":libplatform_config",
":features",
":toolchain",
]
@ -1844,6 +1978,7 @@ if (current_toolchain == snapshot_toolchain) {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]
@ -1933,9 +2068,7 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
}
if (!is_component_build) {
sources += [
"$target_gen_dir/d8-js.cc",
]
sources += [ "$target_gen_dir/d8-js.cc" ]
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
@ -1956,6 +2089,7 @@ source_set("json_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]
@ -1974,6 +2108,7 @@ source_set("parser_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]
@ -1992,6 +2127,45 @@ source_set("regexp_fuzzer") {
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]
}
source_set("wasm_fuzzer") {
sources = [
"test/fuzzer/wasm.cc",
]
deps = [
":fuzzer_support",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]
}
source_set("wasm_asmjs_fuzzer") {
sources = [
"test/fuzzer/wasm-asmjs.cc",
]
deps = [
":fuzzer_support",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
":libplatform_config",
":features",
":toolchain",
]

8
deps/v8/CODE_OF_CONDUCT.md

@ -0,0 +1,8 @@
# V8 Code of Conduct
As part of the Chromium team, the V8 team is committed to preserving and
fostering a diverse, welcoming community. To this end, the [Chromium Code of
Conduct](https://chromium.googlesource.com/chromium/src/+/master/CODE_OF_CONDUCT.md)
applies to our repos and organizations, mailing lists, blog content, and any
other Chromium-supported communication group, as well as any private
communication initiated in the context of these spaces.

1761
deps/v8/ChangeLog

File diff suppressed because it is too large

16
deps/v8/DEPS

@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "ed163ce233f76a950dce1751ac851dbe4b1c00cc",
Var("git_url") + "/external/gyp.git" + "@" + "4ec6c4e3a94bd04a6da2858163d40b2429b8aad1",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "e466f6ac8f60bb9697af4a91c6911c6fc4aec95f",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "c291cde264469b20ca969ce8832088acb21e0c48",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "97b5c485707335dd2952c05bf11412ada3f4fb6f",
Var("git_url") + "/chromium/buildtools.git" + "@" + "80b5126f91be4eb359248d28696746ef09d5be67",
"v8/base/trace_event/common":
Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "4b09207e447ae5bd34643b4c6321bee7b76d35f9",
Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "c8c8665c2deaf1cc749d9f8e153256d4f67bf1b8",
"v8/tools/swarming_client":
Var('git_url') + '/external/swarming.client.git' + '@' + "0b908f18767c8304dc089454bc1c91755d21f1f5",
Var('git_url') + '/external/swarming.client.git' + '@' + "df6e95e7669883c8fe9ef956c69a544154701a49",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -27,15 +27,15 @@ deps = {
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/test262/data":
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "738a24b109f3fa71be44d5c3701d73141d494510",
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "57d3e2216fa86ad63b6c0a54914ba9dcbff96003",
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "a8adb78c8eda9bddb2aa9c51f3fee60296de1ad4",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "faee82e064e04e5cbf60cc7327e7a81d2a4557ad",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "f4c36ad89b2696b37d9cd7ca7d984b691888b188",
Var("git_url") + "/android_tools.git" + "@" + "adfd31794011488cd0fc716b53558b2d8a67af8b",
},
"win": {
"v8/third_party/cygwin":

7
deps/v8/Makefile

@ -162,6 +162,9 @@ endif
ifdef embedscript
GYPFLAGS += -Dembed_script=$(embedscript)
endif
ifdef warmupscript
GYPFLAGS += -Dwarmup_script=$(warmupscript)
endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
@ -219,6 +222,10 @@ endif
ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
# Do not omit the frame pointer, needed for profiling with perf
ifeq ($(no_omit_framepointer), on)
GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
endif
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump

7
deps/v8/OWNERS

@ -1,5 +1,7 @@
adamk@chromium.org
ahaas@chromium.org
bmeurer@chromium.org
cbruni@chromium.org
danno@chromium.org
epertoso@chromium.org
hablich@chromium.org
@ -10,10 +12,13 @@ jkummerow@chromium.org
jochen@chromium.org
littledan@chromium.org
machenbach@chromium.org
mlippautz@chromium.org
marja@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
oth@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
titzer@chromium.org

16
deps/v8/base/trace_event/common/trace_event_common.h

@ -156,7 +156,7 @@
// };
//
// TRACE_EVENT1("foo", "bar", "data",
// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
// std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
//
// The trace framework will take ownership if the passed pointer and it will
// be free'd when the trace buffer is flushed.
@ -926,6 +926,20 @@
name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
arg1_val, arg2_name, arg2_val)
// Special trace event macro to trace task execution with the location where it
// was posted from.
#define TRACE_TASK_EXECUTION(run_function, task) \
TRACE_EVENT2("toplevel", run_function, "src_file", \
(task).posted_from.file_name(), "src_func", \
(task).posted_from.function_name()); \
TRACE_EVENT_API_SCOPED_TASK_EXECUTION_EVENT INTERNAL_TRACE_EVENT_UID( \
task_event)((task).posted_from.file_name());
// TRACE_EVENT_METADATA* events are information related to other
// injected events, not events in their own right.
#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
// Records a clock sync event.
#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
INTERNAL_TRACE_EVENT_ADD( \

36
deps/v8/build/coverage_wrapper.py

@ -0,0 +1,36 @@
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# CC/CXX wrapper script that excludes certain file patterns from coverage
# instrumentation.
import re
import subprocess
import sys
exclusions = [
'buildtools',
'src/third_party',
'third_party',
'test',
'testing',
]
def remove_if_exists(string_list, item):
if item in string_list:
string_list.remove(item)
args = sys.argv[1:]
text = ' '.join(sys.argv[2:])
for exclusion in exclusions:
if re.search(r'\-o obj/%s[^ ]*\.o' % exclusion, text):
remove_if_exists(args, '-fprofile-arcs')
remove_if_exists(args, '-ftest-coverage')
remove_if_exists(args, '-fsanitize-coverage=func')
remove_if_exists(args, '-fsanitize-coverage=bb')
remove_if_exists(args, '-fsanitize-coverage=edge')
break
sys.exit(subprocess.check_call(args))

1
deps/v8/build/get_landmines.py

@ -26,6 +26,7 @@ def main():
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
return 0

2
deps/v8/build/isolate.gypi

@ -76,6 +76,8 @@
'--config-variable', 'icu_use_data_file_flag=0',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',

64
deps/v8/build/standalone.gypi

@ -44,7 +44,7 @@
'v8_deprecation_warnings': 1,
'v8_imminent_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'mac_deployment_target%': '10.7',
'release_extra_cflags%': '',
'variables': {
'variables': {
@ -68,7 +68,9 @@
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
# Instrument for code coverage with gcov.
# Instrument for code coverage and use coverage wrapper to exclude some
# files. Uses gcov if clang=0 is set explicitly. Otherwise,
# sanitizer_coverage must be set too.
'coverage%': 0,
},
'base_dir%': '<(base_dir)',
@ -113,6 +115,8 @@
# Check if valgrind directories are present.
'has_valgrind%': '<!pymod_do_main(has_valgrind)',
'test_isolation_mode%': 'noop',
'conditions': [
# Set default gomadir.
['OS=="win"', {
@ -120,8 +124,7 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \
coverage==0', {
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x"', {
'host_clang%': 1,
}, {
'host_clang%': 0,
@ -136,14 +139,6 @@
}, {
'linux_use_bundled_gold%': 0,
}],
# TODO(machenbach): Remove the conditions as more configurations are
# supported.
['OS=="linux" or OS=="win"', {
'test_isolation_mode%': 'check',
}, {
'test_isolation_mode%': 'noop',
}],
],
},
'base_dir%': '<(base_dir)',
@ -234,7 +229,7 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', {
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1,
}, {
'clang%': 0,
@ -706,7 +701,7 @@
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
'-std=gnu++11',
],
'ldflags': [ '-pthread', ],
'conditions': [
@ -733,7 +728,7 @@
[ 'component=="shared_library"', {
'cflags': [ '-fPIC', ],
}],
[ 'coverage==1', {
[ 'clang==0 and coverage==1', {
'cflags': [ '-fprofile-arcs', '-ftest-coverage'],
'ldflags': [ '-fprofile-arcs'],
}],
@ -756,7 +751,7 @@
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
'-std=gnu++11',
],
'conditions': [
[ 'visibility=="hidden"', {
@ -986,7 +981,7 @@
['clang==1', {
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', # -std=c++11
},
'conditions': [
['v8_target_arch=="x64" or v8_target_arch=="arm64" \
@ -1262,11 +1257,36 @@
# make generator doesn't support CC_wrapper without CC
# in make_global_settings yet.
['use_goma==1 and ("<(GENERATOR)"=="ninja" or clang==1)', {
'make_global_settings': [
['CC_wrapper', '<(gomadir)/gomacc'],
['CXX_wrapper', '<(gomadir)/gomacc'],
['CC.host_wrapper', '<(gomadir)/gomacc'],
['CXX.host_wrapper', '<(gomadir)/gomacc'],
'conditions': [
['coverage==1', {
# Wrap goma with coverage wrapper.
'make_global_settings': [
['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
],
}, {
# Use only goma wrapper.
'make_global_settings': [
['CC_wrapper', '<(gomadir)/gomacc'],
['CXX_wrapper', '<(gomadir)/gomacc'],
['CC.host_wrapper', '<(gomadir)/gomacc'],
['CXX.host_wrapper', '<(gomadir)/gomacc'],
],
}],
],
}, {
'conditions': [
['coverage==1', {
# Use only coverage wrapper.
'make_global_settings': [
['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
],
}],
],
}],
['use_lto==1', {

24
deps/v8/build/toolchain.gypi

@ -1287,7 +1287,8 @@
}],
],
}, # Debug
'Release': {
'ReleaseBase': {
'abstract': 1,
'variables': {
'v8_enable_slow_dchecks%': 0,
},
@ -1367,6 +1368,27 @@
}],
], # conditions
}, # Release
'Release': {
'inherit_from': ['ReleaseBase'],
}, # Debug
'conditions': [
[ 'OS=="win"', {
# TODO(bradnelson): add a gyp mechanism to make this more graceful.
'Debug_x64': {
'inherit_from': ['DebugBaseCommon'],
'conditions': [
['v8_optimized_debug==0', {
'inherit_from': ['DebugBase0'],
}, {
'inherit_from': ['DebugBase1'],
}],
],
},
'Release_x64': {
'inherit_from': ['ReleaseBase'],
},
}],
],
}, # configurations
}, # target_defaults
}

2
deps/v8/include/libplatform/libplatform.h

@ -5,7 +5,7 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
#include "include/v8-platform.h"
#include "v8-platform.h" // NOLINT(build/include)
namespace v8 {
namespace platform {

14
deps/v8/include/v8-debug.h

@ -18,13 +18,11 @@ enum DebugEvent {
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
AfterCompile = 5,
CompileError = 6,
PromiseEvent = 7,
AsyncTaskEvent = 8,
AsyncTaskEvent = 7,
};
class V8_EXPORT Debug {
public:
/**
@ -276,6 +274,14 @@ class V8_EXPORT Debug {
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
/**
* Defines if the ES2015 tail call elimination feature is enabled or not.
* The change of this flag triggers deoptimization of all functions that
* contain calls at tail position.
*/
static bool IsTailCallEliminationEnabled(Isolate* isolate);
static void SetTailCallEliminationEnabled(Isolate* isolate, bool enabled);
};

2
deps/v8/include/v8-experimental.h

@ -10,7 +10,7 @@
#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
#define V8_INCLUDE_V8_EXPERIMENTAL_H_
#include "include/v8.h"
#include "v8.h" // NOLINT(build/include)
namespace v8 {
namespace experimental {

6
deps/v8/include/v8-platform.h

@ -152,9 +152,9 @@ class Platform {
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags) {
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags) {
return 0;
}

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 0
#define V8_BUILD_NUMBER 71
#define V8_PATCH_LEVEL 52
#define V8_MINOR_VERSION 1
#define V8_BUILD_NUMBER 281
#define V8_PATCH_LEVEL 69
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

255
deps/v8/include/v8.h

@ -18,6 +18,8 @@
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <utility>
#include <vector>
#include "v8-version.h" // NOLINT(build/include)
#include "v8config.h" // NOLINT(build/include)
@ -328,6 +330,8 @@ class Local {
template <class F1, class F2, class F3>
friend class PersistentValueMapBase;
template<class F1, class F2> friend class PersistentValueVector;
template <class F>
friend class ReturnValue;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
@ -593,6 +597,13 @@ template <class T> class PersistentBase {
// TODO(dcarney): remove this.
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
* Allows the embedder to tell the v8 garbage collector that a certain object
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
*/
V8_INLINE void RegisterExternalReference(Isolate* isolate);
/**
* Marks the reference to this object independent. Garbage collector is free
* to ignore any object groups containing this object. Weak callback for an
@ -2628,6 +2639,10 @@ enum AccessControl {
PROHIBITS_OVERWRITING = 1 << 2
};
/**
* Integrity level for objects.
*/
enum class IntegrityLevel { kFrozen, kSealed };
/**
* A JavaScript object (ECMA-262, 4.3.3)
@ -2819,6 +2834,11 @@ class V8_EXPORT Object : public Value {
*/
Local<String> GetConstructorName();
/**
* Sets the integrity level of the object.
*/
Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
@ -3118,12 +3138,17 @@ class ReturnValue {
V8_INLINE void SetUndefined();
V8_INLINE void SetEmptyString();
// Convenience getter for Isolate
V8_INLINE Isolate* GetIsolate();
V8_INLINE Isolate* GetIsolate() const;
// Pointer setter: Uncompilable to prevent inadvertent misuse.
template <typename S>
V8_INLINE void Set(S* whatever);
// Getter. Creates a new Local<> so it comes with a certain performance
// hit. If the ReturnValue was not yet set, this will return the undefined
// value.
V8_INLINE Local<Value> Get() const;
private:
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
@ -4886,7 +4911,6 @@ V8_INLINE Local<Primitive> Null(Isolate* isolate);
V8_INLINE Local<Boolean> True(Isolate* isolate);
V8_INLINE Local<Boolean> False(Isolate* isolate);
/**
* A set of constraints that specifies the limits of the runtime's memory use.
* You must set the heap size before initializing the VM - the size cannot be
@ -4895,6 +4919,9 @@ V8_INLINE Local<Boolean> False(Isolate* isolate);
* If you are using threads then you should hold the V8::Locker lock while
* setting the stack limit and you must set a non-default stack limit separately
* for each thread.
*
* The arguments for set_max_semi_space_size, set_max_old_space_size,
* set_max_executable_size, set_code_range_size specify limits in MB.
*/
class V8_EXPORT ResourceConstraints {
public:
@ -4913,17 +4940,23 @@ class V8_EXPORT ResourceConstraints {
uint64_t virtual_memory_limit);
int max_semi_space_size() const { return max_semi_space_size_; }
void set_max_semi_space_size(int value) { max_semi_space_size_ = value; }
void set_max_semi_space_size(int limit_in_mb) {
max_semi_space_size_ = limit_in_mb;
}
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
void set_max_old_space_size(int limit_in_mb) {
max_old_space_size_ = limit_in_mb;
}
int max_executable_size() const { return max_executable_size_; }
void set_max_executable_size(int value) { max_executable_size_ = value; }
void set_max_executable_size(int limit_in_mb) {
max_executable_size_ = limit_in_mb;
}
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
size_t code_range_size() const { return code_range_size_; }
void set_code_range_size(size_t value) {
code_range_size_ = value;
void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb;
}
private:
@ -5047,9 +5080,57 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtask Callback ---
// --- Microtasks Callbacks ---
typedef void (*MicrotasksCompletedCallback)(Isolate*);
typedef void (*MicrotaskCallback)(void* data);
/**
* Policy for running microtasks:
* - explicit: microtasks are invoked with Isolate::RunMicrotasks() method;
* - scoped: microtasks invocation is controlled by MicrotasksScope objects;
* - auto: microtasks are invoked when the script call depth decrements
* to zero.
*/
enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
/**
* This scope is used to control microtasks when kScopeMicrotasksInvocation
* is used on Isolate. In this mode every non-primitive call to V8 should be
* done inside some MicrotasksScope.
* Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
* exits.
* kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
* microtasks.
*/
class V8_EXPORT MicrotasksScope {
public:
enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
MicrotasksScope(Isolate* isolate, Type type);
~MicrotasksScope();
/**
* Runs microtasks if no kRunMicrotasks scope is currently active.
*/
static void PerformCheckpoint(Isolate* isolate);
/**
* Returns current depth of nested kRunMicrotasks scopes.
*/
static int GetCurrentDepth(Isolate* isolate);
private:
internal::Isolate* const isolate_;
bool run_;
// Prevent copying.
MicrotasksScope(const MicrotasksScope&);
MicrotasksScope& operator=(const MicrotasksScope&);
};
// --- Failed Access Check Callback ---
typedef void (*FailedAccessCheckCallback)(Local<Object> target,
AccessType type,
@ -5121,6 +5202,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size() { return total_available_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
size_t malloced_memory() { return malloced_memory_; }
size_t does_zap_garbage() { return does_zap_garbage_; }
private:
@ -5130,6 +5212,7 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
size_t malloced_memory_;
bool does_zap_garbage_;
friend class V8;
@ -5294,6 +5377,52 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
uint16_t class_id) {}
};
/**
* Memory pressure level for the MemoryPressureNotification.
* kNone hints V8 that there is no memory pressure.
* kModerate hints V8 to speed up incremental garbage collection at the cost of
* of higher latency due to garbage collection pauses.
* kCritical hints V8 to free memory as soon as possible. Garbage collection
* pauses at this level will be large.
*/
enum class MemoryPressureLevel { kNone, kModerate, kCritical };
/**
* Interface for tracing through the embedder heap. During the v8 garbage
* collection, v8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
* trace through its heap and call PersistentBase::RegisterExternalReference on
* each js object reachable from any of the given wrappers.
*
* Before the first call to the TraceWrappableFrom function v8 will call
* TraceRoots. When the v8 garbage collection is finished, v8 will call
* ClearTracingMarks.
*/
class EmbedderHeapTracer {
public:
/**
* V8 will call this method at the beginning of the gc cycle.
*/
virtual void TraceRoots(Isolate* isolate) = 0;
/**
* V8 will call this method with internal fields of a potential wrappers.
* Embedder is expected to trace its heap (synchronously) and call
* PersistentBase::RegisterExternalReference() on all wrappers reachable from
* any of the given wrappers.
*/
virtual void TraceWrappableFrom(
Isolate* isolate,
const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
/**
* V8 will call this method at the end of the gc cycle. Allocation is *not*
* allowed in the ClearTracingMarks.
*/
virtual void ClearTracingMarks(Isolate* isolate) = 0;
protected:
virtual ~EmbedderHeapTracer() = default;
};
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
@ -5489,6 +5618,9 @@ class V8_EXPORT Isolate {
kArrayPrototypeConstructorModified = 26,
kArrayInstanceProtoModified = 27,
kArrayInstanceConstructorModified = 28,
kLegacyFunctionDeclaration = 29,
kRegExpPrototypeSourceGetter = 30,
kRegExpPrototypeOldFlagGetter = 31,
// If you add new values here, you'll also need to update V8Initializer.cpp
// in Chromium.
@ -5531,6 +5663,14 @@ class V8_EXPORT Isolate {
void SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
* It is allowed to call this function from another thread while
* the isolate is executing long running JavaScript code.
*/
void MemoryPressureNotification(MemoryPressureLevel level);
/**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
@ -5752,6 +5892,11 @@ class V8_EXPORT Isolate {
*/
void RemoveGCPrologueCallback(GCCallback callback);
/**
* Sets the embedder heap tracer for the isolate.
*/
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are allowed in the callback function,
@ -5888,17 +6033,39 @@ class V8_EXPORT Isolate {
*/
void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
/**
* Experimental: Controls how Microtasks are invoked. See MicrotasksPolicy
* for details.
*/
void SetAutorunMicrotasks(bool autorun);
void SetMicrotasksPolicy(MicrotasksPolicy policy);
V8_DEPRECATE_SOON("Use SetMicrotasksPolicy",
void SetAutorunMicrotasks(bool autorun));
/**
* Experimental: Returns whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
* Experimental: Returns the policy controlling how Microtasks are invoked.
*/
bool WillAutorunMicrotasks() const;
MicrotasksPolicy GetMicrotasksPolicy() const;
V8_DEPRECATE_SOON("Use GetMicrotasksPolicy",
bool WillAutorunMicrotasks() const);
/**
* Experimental: adds a callback to notify the host application after
* microtasks were run. The callback is triggered by explicit RunMicrotasks
* call or automatic microtasks execution (see SetAutorunMicrotasks).
*
* Callback will trigger even if microtasks were attempted to run,
* but the microtasks queue was empty and no single microtask was actually
* executed.
*
* Executing scriptsinside the callback will not re-trigger microtasks and
* the callback.
*/
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
/**
* Removes callback that was installed by AddMicrotasksCompletedCallback.
*/
void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
/**
* Sets a callback for counting the number of times a feature of V8 is used.
@ -6195,11 +6362,23 @@ class V8_EXPORT V8 {
static void SetSnapshotDataBlob(StartupData* startup_blob);
/**
* Create a new isolate and context for the purpose of capturing a snapshot
* Bootstrap an isolate and a context from scratch to create a startup
* snapshot. Include the side-effects of running the optional script.
* Returns { NULL, 0 } on failure.
* The caller owns the data array in the return value.
* The caller acquires ownership of the data array in the return value.
*/
static StartupData CreateSnapshotDataBlob(const char* custom_source = NULL);
static StartupData CreateSnapshotDataBlob(const char* embedded_source = NULL);
/**
* Bootstrap an isolate and a context from the cold startup blob, run the
* warm-up script to trigger code compilation. The side effects are then
* discarded. The resulting startup snapshot will include compiled code.
* Returns { NULL, 0 } on failure.
* The caller acquires ownership of the data array in the return value.
* The argument startup blob is untouched.
*/
static StartupData WarmUpSnapshotDataBlob(StartupData cold_startup_blob,
const char* warmup_source);
/**
* Adds a message listener.
@ -6475,6 +6654,8 @@ class V8_EXPORT V8 {
static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void RegisterExternallyReferencedObject(internal::Object** object,
internal::Isolate* isolate);
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback);
static void MakeWeak(internal::Object** global_handle, void* data,
@ -7149,7 +7330,7 @@ class Internals {
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 4 * kApiPointerSize;
static const int kOddballKindOffset = 5 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
@ -7168,11 +7349,12 @@ class Internals {
static const int kIsolateRootsOffset =
kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset + kApiInt64Size +
kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 10;
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
static const int kTrueValueRootIndex = 7;
static const int kFalseValueRootIndex = 8;
static const int kEmptyStringRootIndex = 9;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
@ -7188,7 +7370,7 @@ class Internals {
static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
static const int kJSObjectType = 0xb5;
static const int kJSObjectType = 0xb8;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@ -7492,6 +7674,13 @@ P* PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
}
template <class T>
void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) {
if (IsEmpty()) return;
V8::RegisterExternallyReferencedObject(
reinterpret_cast<internal::Object**>(this->val_),
reinterpret_cast<internal::Isolate*>(isolate));
}
template <class T>
void PersistentBase<T>::MarkIndependent() {
@ -7641,14 +7830,22 @@ void ReturnValue<T>::SetEmptyString() {
*value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
}
template<typename T>
Isolate* ReturnValue<T>::GetIsolate() {
template <typename T>
Isolate* ReturnValue<T>::GetIsolate() const {
// Isolate is always the pointer below the default value on the stack.
return *reinterpret_cast<Isolate**>(&value_[-2]);
}
template<typename T>
template<typename S>
template <typename T>
Local<Value> ReturnValue<T>::Get() const {
typedef internal::Internals I;
if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
return Local<Value>(*Undefined(GetIsolate()));
return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
}
template <typename T>
template <typename S>
void ReturnValue<T>::Set(S* whatever) {
// Uncompilable to prevent inadvertent misuse.
TYPE_CHECK(S*, Primitive);

8
deps/v8/include/v8config.h

@ -266,13 +266,7 @@
# define V8_HAS_BUILTIN_FRAME_ADDRESS (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
// without warnings (functionality used by the macros below). These modes
// are detectable by checking whether __GXX_EXPERIMENTAL_CXX0X__ is defined or,
// more standardly, by checking whether __cplusplus has a C++11 or greater
// value. Current versions of g++ do not correctly set __cplusplus, so we check
// both for forward compatibility.
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# if __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# endif

7
deps/v8/infra/config/cq.cfg

@ -65,13 +65,14 @@ verifiers {
name: "v8_win_rel_ng_triggered"
triggered_by: "v8_win_rel_ng"
}
}
buckets {
name: "tryserver.v8"
builders {
name: "v8_linux_blink_rel"
experiment_percentage: 20
}
builders {
name: "v8_linux64_sanitizer_coverage_rel"
experiment_percentage: 100
}
}
}

3
deps/v8/src/DEPS

@ -21,7 +21,4 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
"api-experimental\.cc": [
"+src/compiler/fast-accessor-assembler.h",
],
}

3
deps/v8/src/accessors.cc

@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
info->set_is_sloppy(false);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@ -817,7 +818,7 @@ void Accessors::FunctionLengthGetter(
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
if (Compiler::Compile(function, KEEP_EXCEPTION)) {
if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
length = function->shared()->length();
}
if (isolate->has_pending_exception()) {

31
deps/v8/src/api-arguments.cc

@ -0,0 +1,31 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/api-arguments.h"
namespace v8 {
namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
is_construct_call_);
f(info);
return GetReturnValue<Object>(isolate);
}
Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Array> info(begin());
f(info);
return GetReturnValue<JSObject>(isolate);
}
} // namespace internal
} // namespace v8

254
deps/v8/src/api-arguments.h

@ -0,0 +1,254 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_API_ARGUMENTS_H_
#define V8_API_ARGUMENTS_H_
#include "src/api.h"
#include "src/isolate.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
virtual inline void IterateInstance(ObjectVisitor* v) {
v->VisitPointers(values_, values_ + kArrayLength);
}
protected:
inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
Object* values_[kArrayLength];
};
template <typename T>
class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
protected:
explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
template <typename V>
Handle<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
};
template <typename T>
template <typename V>
Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return Handle<V>();
Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
result->VerifyApiCallResultType();
return result;
}
class PropertyCallbackArguments
: public CustomArguments<PropertyCallbackInfo<Value> > {
public:
typedef PropertyCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
JSObject* holder, Object::ShouldThrow should_throw)
: Super(isolate) {
Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
values[T::kShouldThrowOnErrorIndex] =
Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> Call(Function f, Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, \
ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
f(v8::Utils::ToLocal(name), info); \
return GetReturnValue<InternalReturn>(isolate); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
#undef WRITE_CALL_1_NAME
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> Call(Function f, uint32_t index) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
holder(), index)); \
f(index, info); \
return GetReturnValue<InternalReturn>(isolate); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
#undef WRITE_CALL_1_INDEX
Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
void Call(AccessorNameSetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<void> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
private:
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
};
class FunctionCallbackArguments
: public CustomArguments<FunctionCallbackInfo<Value> > {
public:
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
internal::HeapObject* callee,
internal::Object* holder, internal::Object** argv,
int argc, bool is_construct_call)
: Super(isolate),
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
values[T::kCalleeIndex]->IsFunctionTemplateInfo());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
* The following Call function wraps the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
Handle<Object> Call(FunctionCallback f);
private:
internal::Object** argv_;
int argc_;
bool is_construct_call_;
};
} // namespace internal
} // namespace v8
#endif // V8_API_ARGUMENTS_H_

15
deps/v8/src/api-experimental.cc

@ -11,20 +11,17 @@
#include "include/v8.h"
#include "include/v8-experimental.h"
#include "src/api.h"
#include "src/compiler/fast-accessor-assembler.h"
#include "src/fast-accessor-assembler.h"
namespace {
v8::internal::compiler::FastAccessorAssembler* FromApi(
v8::internal::FastAccessorAssembler* FromApi(
v8::experimental::FastAccessorBuilder* builder) {
return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
builder);
return reinterpret_cast<v8::internal::FastAccessorAssembler*>(builder);
}
v8::experimental::FastAccessorBuilder* FromInternal(
v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
v8::internal::FastAccessorAssembler* fast_accessor_assembler) {
return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
fast_accessor_assembler);
}
@ -57,8 +54,8 @@ namespace experimental {
FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal::compiler::FastAccessorAssembler* faa =
new internal::compiler::FastAccessorAssembler(i_isolate);
internal::FastAccessorAssembler* faa =
new internal::FastAccessorAssembler(i_isolate);
return FromInternal(faa);
}

74
deps/v8/src/api-natives.cc

@ -266,28 +266,45 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
void CacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number,
void CacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number,
Handle<JSObject> object) {
auto cache = isolate->template_instantiations_cache();
auto new_cache = ObjectHashTable::Put(cache, serial_number, object);
auto new_cache =
UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
}
void UncacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number) {
void UncacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number) {
auto cache = isolate->template_instantiations_cache();
bool was_present = false;
auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
DCHECK(was_present);
int entry = cache->FindEntry(serial_number);
DCHECK(entry != UnseededNumberDictionary::kNotFound);
Handle<Object> result =
UnseededNumberDictionary::DeleteProperty(cache, entry);
USE(result);
DCHECK(result->IsTrue());
auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
bool is_hidden_prototype) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
// Fast path.
Handle<JSObject> result;
uint32_t serial_number =
static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
if (serial_number) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
if (entry != UnseededNumberDictionary::kNotFound) {
Object* boilerplate = cache->ValueAt(entry);
result = handle(JSObject::cast(boilerplate), isolate);
return isolate->factory()->CopyJSObject(result);
}
}
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
auto constructor = handle(info->constructor(), isolate);
Handle<JSFunction> cons;
if (constructor->IsUndefined()) {
@ -297,18 +314,6 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
}
auto serial_number = handle(Smi::cast(info->serial_number()), isolate);
if (serial_number->value()) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
Object* boilerplate = cache->Lookup(serial_number);
if (boilerplate->IsJSObject()) {
result = handle(JSObject::cast(boilerplate), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
return scope.CloseAndEscape(result);
}
}
auto object = isolate->factory()->NewJSObject(cons);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@ -317,10 +322,9 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
// TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
if (serial_number->value()) {
if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
result = isolate->factory()->CopyJSObject(result);
}
return scope.CloseAndEscape(result);
}
@ -329,12 +333,14 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name) {
auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
if (serial_number->value()) {
uint32_t serial_number =
static_cast<uint32_t>(Smi::cast(data->serial_number())->value());
if (serial_number) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
Object* element = cache->Lookup(serial_number);
if (element->IsJSFunction()) {
int entry = cache->FindEntry(serial_number);
if (entry != UnseededNumberDictionary::kNotFound) {
Object* element = cache->ValueAt(entry);
return handle(JSFunction::cast(element), isolate);
}
}
@ -378,7 +384,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
}
if (serial_number->value()) {
if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, function);
}
@ -386,7 +392,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
if (serial_number->value()) {
if (serial_number) {
UncacheTemplateInstantiation(isolate, serial_number);
}
return MaybeHandle<JSFunction>();
@ -536,7 +542,13 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:
type = JS_OBJECT_TYPE;
if (!obj->needs_access_check() &&
obj->named_property_handler()->IsUndefined() &&
obj->indexed_property_handler()->IsUndefined()) {
type = JS_OBJECT_TYPE;
} else {
type = JS_SPECIAL_API_OBJECT_TYPE;
}
instance_size += JSObject::kHeaderSize;
break;
case GlobalObjectType:
@ -564,7 +576,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
}
result->shared()->set_function_data(*obj);
result->shared()->set_api_func_data(*obj);
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();

415
deps/v8/src/api.cc

@ -156,6 +156,18 @@ class InternalEscapableScope : public v8::EscapableHandleScope {
};
#ifdef DEBUG
void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
auto handle_scope_implementer = isolate->handle_scope_implementer();
if (handle_scope_implementer->microtasks_policy() ==
v8::MicrotasksPolicy::kScoped) {
DCHECK(handle_scope_implementer->GetMicrotasksScopeDepth() ||
!handle_scope_implementer->DebugMicrotasksScopeDepthIsZero());
}
}
#endif
class CallDepthScope {
public:
explicit CallDepthScope(i::Isolate* isolate, Local<Context> context,
@ -175,6 +187,9 @@ class CallDepthScope {
if (!context_.IsEmpty()) context_->Exit();
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
if (do_callback_) isolate_->FireCallCompletedCallback();
#ifdef DEBUG
if (do_callback_) CheckMicrotasksScopesConsistency(isolate_);
#endif
}
void Escape() {
@ -226,7 +241,7 @@ void i::FatalProcessOutOfMemory(const char* location) {
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
i::Isolate* isolate = i::Isolate::Current();
char last_few_messages[Heap::kTraceRingBufferSize + 1];
char js_stacktrace[Heap::kStacktraceBufferSize + 1];
@ -288,9 +303,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
}
Utils::ApiCheck(false, location, is_heap_oom
? "Allocation failed - JavaScript heap out of memory"
: "Allocation failed - process out of memory");
Utils::ApiCheck(false, location, "Allocation failed - process out of memory");
// If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
@ -328,12 +341,23 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
const char* utf8_source) {
// Run custom script if provided.
const char* utf8_source, const char* name) {
base::ElapsedTimer timer;
timer.Start();
Context::Scope context_scope(context);
TryCatch try_catch(isolate);
Local<String> source_string;
if (!String::NewFromUtf8(isolate, utf8_source, NewStringType::kNormal)
@ -341,7 +365,7 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
return false;
}
Local<String> resource_name =
String::NewFromUtf8(isolate, "<embedded script>", NewStringType::kNormal)
String::NewFromUtf8(isolate, name, NewStringType::kNormal)
.ToLocalChecked();
ScriptOrigin origin(resource_name);
ScriptCompiler::Source source(source_string, origin);
@ -349,7 +373,7 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
if (!ScriptCompiler::Compile(context, &source).ToLocal(&script)) return false;
if (script->Run(context).IsEmpty()) return false;
if (i::FLAG_profile_deserialization) {
i::PrintF("Executing custom snapshot script took %0.3f ms\n",
i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
timer.Elapsed().InMillisecondsF());
}
timer.Stop();
@ -357,92 +381,152 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
return true;
}
StartupData SerializeIsolateAndContext(
Isolate* isolate, Persistent<Context>* context,
i::Snapshot::Metadata metadata,
i::StartupSerializer::FunctionCodeHandling function_code_handling) {
if (context->IsEmpty()) return {NULL, 0};
namespace {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
// GC may have cleared weak cells, so compact any WeakFixedArrays
// found on the heap.
i::HeapIterator iterator(internal_isolate->heap(),
i::HeapIterator::kFilterUnreachable);
for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
i::Object* prototype_users = i::PrototypeInfo::cast(o)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
}
} else if (o->IsScript()) {
i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
if (shared_list->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
array->Compact<i::WeakFixedArray::NullCallback>();
}
}
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
i::Object* raw_context = *v8::Utils::OpenPersistent(*context);
context->Reset();
i::SnapshotByteSink snapshot_sink;
i::StartupSerializer ser(internal_isolate, &snapshot_sink,
function_code_handling);
ser.SerializeStrongReferences();
i::SnapshotByteSink context_sink;
i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
context_ser.Serialize(&raw_context);
ser.SerializeWeakReferencesAndDeferred();
return i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
}
} // namespace
StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
// Create a new isolate and a new context from scratch, optionally run
// a script to embed, and serialize to create a snapshot blob.
StartupData result = {NULL, 0};
base::ElapsedTimer timer;
timer.Start();
StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
i::Isolate* internal_isolate = new i::Isolate(true);
ArrayBufferAllocator allocator;
i::Isolate* internal_isolate = new i::Isolate(true);
internal_isolate->set_array_buffer_allocator(&allocator);
Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
StartupData result = {NULL, 0};
{
base::ElapsedTimer timer;
timer.Start();
Isolate::Scope isolate_scope(isolate);
internal_isolate->Init(NULL);
Persistent<Context> context;
i::Snapshot::Metadata metadata;
{
HandleScope handle_scope(isolate);
Local<Context> new_context = Context::New(isolate);
context.Reset(isolate, new_context);
if (custom_source != NULL) {
metadata.set_embeds_script(true);
Context::Scope context_scope(new_context);
if (!RunExtraCode(isolate, new_context, custom_source)) context.Reset();
if (embedded_source != NULL &&
!RunExtraCode(isolate, new_context, embedded_source, "<embedded>")) {
context.Reset();
}
}
if (!context.IsEmpty()) {
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
// GC may have cleared weak cells, so compact any WeakFixedArrays
// found on the heap.
i::HeapIterator iterator(internal_isolate->heap(),
i::HeapIterator::kFilterUnreachable);
for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
i::Object* prototype_users =
i::PrototypeInfo::cast(o)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
}
} else if (o->IsScript()) {
i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
if (shared_list->IsWeakFixedArray()) {
i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
array->Compact<i::WeakFixedArray::NullCallback>();
}
}
}
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Reset();
i::Snapshot::Metadata metadata;
metadata.set_embeds_script(embedded_source != NULL);
i::SnapshotByteSink snapshot_sink;
i::StartupSerializer ser(internal_isolate, &snapshot_sink);
ser.SerializeStrongReferences();
result = SerializeIsolateAndContext(
isolate, &context, metadata, i::StartupSerializer::CLEAR_FUNCTION_CODE);
DCHECK(context.IsEmpty());
}
isolate->Dispose();
i::SnapshotByteSink context_sink;
i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
context_ser.Serialize(&raw_context);
ser.SerializeWeakReferencesAndDeferred();
if (i::FLAG_profile_deserialization) {
i::PrintF("Creating snapshot took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
}
timer.Stop();
return result;
}
StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
const char* warmup_source) {
CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != NULL);
CHECK(warmup_source != NULL);
// Use following steps to create a warmed up snapshot blob from a cold one:
// - Create a new isolate from the cold snapshot.
// - Create a new context to run the warmup script. This will trigger
// compilation of executed functions.
// - Create a new context. This context will be unpolluted.
// - Serialize the isolate and the second context into a new snapshot blob.
StartupData result = {NULL, 0};
base::ElapsedTimer timer;
timer.Start();
result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
ArrayBufferAllocator allocator;
i::Isolate* internal_isolate = new i::Isolate(true);
internal_isolate->set_array_buffer_allocator(&allocator);
internal_isolate->set_snapshot_blob(&cold_snapshot_blob);
Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
{
Isolate::Scope isolate_scope(isolate);
i::Snapshot::Initialize(internal_isolate);
Persistent<Context> context;
bool success;
{
HandleScope handle_scope(isolate);
Local<Context> new_context = Context::New(isolate);
success = RunExtraCode(isolate, new_context, warmup_source, "<warm-up>");
}
if (i::FLAG_profile_deserialization) {
i::PrintF("Creating snapshot took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
if (success) {
HandleScope handle_scope(isolate);
isolate->ContextDisposedNotification(false);
Local<Context> new_context = Context::New(isolate);
context.Reset(isolate, new_context);
}
timer.Stop();
i::Snapshot::Metadata metadata;
metadata.set_embeds_script(i::Snapshot::EmbedsScript(internal_isolate));
result = SerializeIsolateAndContext(
isolate, &context, metadata, i::StartupSerializer::KEEP_FUNCTION_CODE);
DCHECK(context.IsEmpty());
}
isolate->Dispose();
if (i::FLAG_profile_deserialization) {
i::PrintF("Warming up snapshot took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
}
timer.Stop();
return result;
}
@ -593,6 +677,10 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
return result.location();
}
void V8::RegisterExternallyReferencedObject(i::Object** object,
i::Isolate* isolate) {
isolate->heap()->RegisterExternallyReferencedObject(object);
}
void V8::MakeWeak(i::Object** object, void* parameter,
WeakCallback weak_callback) {
@ -940,19 +1028,15 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
if (i::FLAG_warn_template_set &&
value_obj->IsJSReceiver() &&
!value_obj->IsTemplateInfo()) {
base::OS::PrintError(
"(node) v8::%sTemplate::Set() with non-primitive values is deprecated\n"
"(node) and will stop working in the next major release.\n",
templ->IsFunctionTemplateInfo() ? "Function" : "Object");
isolate->PrintStack(stderr, i::Isolate::kPrintStackConcise);
base::DumpBacktrace();
if (value_obj->IsObjectTemplateInfo()) {
templ->set_serial_number(i::Smi::FromInt(0));
if (templ->IsFunctionTemplateInfo()) {
i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
}
}
// TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
Utils::OpenHandle(*value),
value_obj,
static_cast<i::PropertyAttributes>(attribute));
}
@ -1772,7 +1856,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
result = i::Compiler::CompileScript(
result = i::Compiler::GetSharedFunctionInfoForScript(
str, name_obj, line_offset, column_offset, source->resource_options,
source_map_url, isolate->native_context(), NULL, &script_data, options,
i::NOT_NATIVES_CODE, is_module);
@ -1841,7 +1925,6 @@ Local<Script> ScriptCompiler::Compile(
MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
Source* source,
CompileOptions options) {
CHECK(i::FLAG_harmony_modules);
auto isolate = context->GetIsolate();
auto maybe = CompileUnboundInternal(isolate, source, options, true);
Local<UnboundScript> generic;
@ -2038,8 +2121,8 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
i::Handle<i::SharedFunctionInfo> result;
if (source->info->literal() != nullptr) {
// Parsing has succeeded.
result = i::Compiler::CompileStreamedScript(script, source->info.get(),
str->length());
result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
script, source->info.get(), str->length());
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
@ -2213,7 +2296,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!maybe.FromJust()) return v8::Local<Value>();
Local<Value> result;
has_pending_exception =
!ToLocal<Value>(i::Object::GetProperty(obj, name), &result);
!ToLocal<Value>(i::JSReceiver::GetProperty(obj, name), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@ -2442,7 +2525,7 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
auto self = Utils::OpenHandle(this);
auto obj = i::Object::GetElement(isolate, self, index).ToHandleChecked();
auto obj = i::JSReceiver::GetElement(isolate, self, index).ToHandleChecked();
auto jsobj = i::Handle<i::JSObject>::cast(obj);
return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
@ -2482,7 +2565,7 @@ static int getIntProperty(const StackFrame* f, const char* propertyName,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsSmi() ? i::Smi::cast(*obj)->value() : defaultValue;
}
@ -2509,7 +2592,7 @@ static Local<String> getStringProperty(const StackFrame* f,
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsString()
? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
: Local<String>();
@ -2537,7 +2620,7 @@ static bool getBoolProperty(const StackFrame* f, const char* propertyName) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
return obj->IsTrue();
}
@ -3485,7 +3568,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj, i::LookupIterator::OWN);
isolate, self, key_obj, self, i::LookupIterator::OWN);
Maybe<bool> result =
i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
@ -3502,7 +3585,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
Maybe<bool> result =
i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
@ -3608,7 +3691,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
}
auto js_object = i::Handle<i::JSObject>::cast(self);
i::LookupIterator it(js_object, key_obj);
i::LookupIterator it(js_object, key_obj, js_object);
has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes(
&it, value_obj, i::DONT_ENUM)
.is_null();
@ -3641,7 +3724,7 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
has_pending_exception =
!i::Object::GetElement(isolate, self, index).ToHandle(&result);
!i::JSReceiver::GetElement(isolate, self, index).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@ -3768,11 +3851,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
!i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
.ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
auto elms = isolate->factory()->CopyFixedArray(value);
auto result = isolate->factory()->NewJSArrayWithElements(elms);
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache() != *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@ -3791,11 +3873,10 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
!i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
.ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
auto elms = isolate->factory()->CopyFixedArray(value);
auto result = isolate->factory()->NewJSArrayWithElements(elms);
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache() != *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@ -3829,6 +3910,19 @@ Local<String> v8::Object::GetConstructorName() {
return Utils::ToLocal(name);
}
Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
IntegrityLevel level) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetIntegrityLevel()",
bool);
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
Maybe<bool> result =
i::JSReceiver::SetIntegrityLevel(self, i_level, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
}
Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
@ -4159,7 +4253,7 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj,
isolate, self, key_obj, self,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
@ -4183,7 +4277,7 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj,
isolate, self, key_obj, self,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
@ -5413,13 +5507,15 @@ bool v8::V8::Dispose() {
return true;
}
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
HeapStatistics::HeapStatistics()
: total_heap_size_(0),
total_heap_size_executable_(0),
total_physical_size_(0),
total_available_size_(0),
used_heap_size_(0),
heap_size_limit_(0),
malloced_memory_(0),
does_zap_garbage_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
@ -6861,7 +6957,7 @@ static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
else
symbol = isolate->factory()->NewSymbol();
i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
i::Object::SetPropertyOrElement(symbols, name, symbol, i::STRICT).Assert();
}
return i::Handle<i::Symbol>::cast(symbol);
}
@ -7097,6 +7193,10 @@ void V8::AddGCEpilogueCallback(GCCallback callback, GCType gc_type) {
reinterpret_cast<v8::Isolate::GCCallback>(callback), gc_type, false);
}
void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
@ -7296,10 +7396,12 @@ Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
isolate_->handle_scope_implementer()->IncrementCallDepth();
isolate_->handle_scope_implementer()->IncrementMicrotasksSuppressions();
}
Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
isolate_->handle_scope_implementer()->DecrementMicrotasksSuppressions();
isolate_->handle_scope_implementer()->DecrementCallDepth();
}
@ -7314,6 +7416,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
heap_statistics->malloced_memory_ =
isolate->allocator()->GetCurrentMemoryUsage();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@ -7441,6 +7545,7 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
void Isolate::RunMicrotasks() {
DCHECK(MicrotasksPolicy::kScoped != GetMicrotasksPolicy());
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
@ -7464,12 +7569,41 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
void Isolate::SetAutorunMicrotasks(bool autorun) {
reinterpret_cast<i::Isolate*>(this)->set_autorun_microtasks(autorun);
SetMicrotasksPolicy(
autorun ? MicrotasksPolicy::kAuto : MicrotasksPolicy::kExplicit);
}
bool Isolate::WillAutorunMicrotasks() const {
return reinterpret_cast<const i::Isolate*>(this)->autorun_microtasks();
return GetMicrotasksPolicy() == MicrotasksPolicy::kAuto;
}
void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->handle_scope_implementer()->set_microtasks_policy(policy);
}
MicrotasksPolicy Isolate::GetMicrotasksPolicy() const {
i::Isolate* isolate =
reinterpret_cast<i::Isolate*>(const_cast<Isolate*>(this));
return isolate->handle_scope_implementer()->microtasks_policy();
}
void Isolate::AddMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
DCHECK(callback);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->AddMicrotasksCompletedCallback(callback);
}
void Isolate::RemoveMicrotasksCompletedCallback(
MicrotasksCompletedCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->RemoveMicrotasksCompletedCallback(callback);
}
@ -7548,6 +7682,11 @@ void Isolate::IsolateInBackgroundNotification() {
return isolate->heap()->SetOptimizeForMemoryUsage();
}
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->MemoryPressureNotification(level,
Locker::IsLocked(this));
}
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
@ -7693,6 +7832,49 @@ void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
}
MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
run_(type == MicrotasksScope::kRunMicrotasks) {
auto handle_scope_implementer = isolate_->handle_scope_implementer();
if (run_) handle_scope_implementer->IncrementMicrotasksScopeDepth();
#ifdef DEBUG
if (!run_) handle_scope_implementer->IncrementDebugMicrotasksScopeDepth();
#endif
}
MicrotasksScope::~MicrotasksScope() {
auto handle_scope_implementer = isolate_->handle_scope_implementer();
if (run_) {
handle_scope_implementer->DecrementMicrotasksScopeDepth();
if (MicrotasksPolicy::kScoped ==
handle_scope_implementer->microtasks_policy()) {
PerformCheckpoint(reinterpret_cast<Isolate*>(isolate_));
}
}
#ifdef DEBUG
if (!run_) handle_scope_implementer->DecrementDebugMicrotasksScopeDepth();
#endif
}
void MicrotasksScope::PerformCheckpoint(Isolate* v8Isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
if (IsExecutionTerminatingCheck(isolate)) return;
auto handle_scope_implementer = isolate->handle_scope_implementer();
if (!handle_scope_implementer->GetMicrotasksScopeDepth() &&
!handle_scope_implementer->HasMicrotasksSuppressions()) {
isolate->RunMicrotasks();
}
}
int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
}
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
@ -7887,7 +8069,7 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
RETURN_ON_FAILED_EXECUTION(Value);
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
@ -7932,6 +8114,15 @@ void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
internal_isolate->debug()->set_live_edit_enabled(enable);
}
bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
return internal_isolate->is_tail_call_elimination_enabled();
}
void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->SetTailCallEliminationEnabled(enabled);
}
MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
Local<Value> value) {

49
deps/v8/src/api.h

@ -452,6 +452,12 @@ class HandleScopeImplementer {
saved_contexts_(0),
spare_(NULL),
call_depth_(0),
microtasks_depth_(0),
microtasks_suppressions_(0),
#ifdef DEBUG
debug_microtasks_depth_(0),
#endif
microtasks_policy_(v8::MicrotasksPolicy::kAuto),
last_handle_before_deferred_block_(NULL) { }
~HandleScopeImplementer() {
@ -472,10 +478,36 @@ class HandleScopeImplementer {
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(internal::Object** prev_limit);
// Call depth represents nested v8 api calls.
inline void IncrementCallDepth() {call_depth_++;}
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
// Microtasks scope depth represents nested scopes controlling microtasks
// invocation, which happens when depth reaches zero.
inline void IncrementMicrotasksScopeDepth() {microtasks_depth_++;}
inline void DecrementMicrotasksScopeDepth() {microtasks_depth_--;}
inline int GetMicrotasksScopeDepth() { return microtasks_depth_; }
// Possibly nested microtasks suppression scopes prevent microtasks
// from running.
inline void IncrementMicrotasksSuppressions() {microtasks_suppressions_++;}
inline void DecrementMicrotasksSuppressions() {microtasks_suppressions_--;}
inline bool HasMicrotasksSuppressions() { return !!microtasks_suppressions_; }
#ifdef DEBUG
// In debug we check that calls not intended to invoke microtasks are
// still correctly wrapped with microtask scopes.
inline void IncrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_++;}
inline void DecrementDebugMicrotasksScopeDepth() {debug_microtasks_depth_--;}
inline bool DebugMicrotasksScopeDepthIsZero() {
return debug_microtasks_depth_ == 0;
}
#endif
inline void set_microtasks_policy(v8::MicrotasksPolicy policy);
inline v8::MicrotasksPolicy microtasks_policy() const;
inline void EnterContext(Handle<Context> context);
inline void LeaveContext();
inline bool LastEnteredContextWas(Handle<Context> context);
@ -532,6 +564,12 @@ class HandleScopeImplementer {
List<Context*> saved_contexts_;
Object** spare_;
int call_depth_;
int microtasks_depth_;
int microtasks_suppressions_;
#ifdef DEBUG
int debug_microtasks_depth_;
#endif
v8::MicrotasksPolicy microtasks_policy_;
Object** last_handle_before_deferred_block_;
// This is only used for threading support.
HandleScopeData handle_scope_data_;
@ -550,6 +588,17 @@ class HandleScopeImplementer {
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
void HandleScopeImplementer::set_microtasks_policy(
v8::MicrotasksPolicy policy) {
microtasks_policy_ = policy;
}
v8::MicrotasksPolicy HandleScopeImplementer::microtasks_policy() const {
return microtasks_policy_;
}
void HandleScopeImplementer::SaveContext(Context* context) {
saved_contexts_.Add(context);
}

85
deps/v8/src/arguments.cc

@ -4,93 +4,9 @@
#include "src/arguments.h"
#include "src/api.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
template <typename T>
template <typename V>
v8::Local<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return v8::Local<V>();
return Utils::Convert<Object, V>(Handle<Object>(handle));
}
v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(),
argv_,
argc_,
is_construct_call_);
f(info);
return GetReturnValue<v8::Value>(isolate);
}
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Local<ReturnValue> PropertyCallbackArguments::Call(Function f, \
Arg1 arg1) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Local<ReturnValue> PropertyCallbackArguments::Call( \
Function f, Arg1 arg1, Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
return GetReturnValue<ReturnValue>(isolate); \
}
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void PropertyCallbackArguments::Call(Function f, Arg1 arg1, Arg2 arg2) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ReturnValue> info(begin()); \
f(arg1, arg2, info); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
#undef WRITE_CALL_0
#undef WRITE_CALL_1
#undef WRITE_CALL_2
#undef WRITE_CALL_2_VOID
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
// TODO(ulan): This clobbers only subset of registers depending on compiler,
// Rewrite this in assembly to really clobber all registers.
@ -98,6 +14,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
}
} // namespace internal
} // namespace v8

222
deps/v8/src/arguments.h

@ -6,7 +6,8 @@
#define V8_ARGUMENTS_H_
#include "src/allocation.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@ -70,217 +71,30 @@ class Arguments BASE_EMBEDDED {
Object** arguments_;
};
// For each type of callback, we have a list of arguments
// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
// F(GenericNamedPropertyEnumeratorCallback, ...)
// F(GenericNamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
F(IndexedPropertyEnumeratorCallback, v8::Array)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
F(GenericNamedPropertyQueryCallback, v8::Integer, v8::Local<v8::Name>) \
F(GenericNamedPropertyDeleterCallback, v8::Boolean, v8::Local<v8::Name>) \
F(IndexedPropertyGetterCallback, v8::Value, uint32_t) \
F(IndexedPropertyQueryCallback, v8::Integer, uint32_t) \
F(IndexedPropertyDeleterCallback, v8::Boolean, uint32_t)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
F(GenericNamedPropertySetterCallback, v8::Value, v8::Local<v8::Name>, \
v8::Local<v8::Value>) \
F(IndexedPropertySetterCallback, v8::Value, uint32_t, v8::Local<v8::Value>)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
F(AccessorNameSetterCallback, \
void, \
v8::Local<v8::Name>, \
v8::Local<v8::Value>) \
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
template<int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
virtual inline void IterateInstance(ObjectVisitor* v) {
v->VisitPointers(values_, values_ + kArrayLength);
}
protected:
inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
Object* values_[kArrayLength];
};
template<typename T>
class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
protected:
explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
template <typename V>
v8::Local<V> GetReturnValue(Isolate* isolate);
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
};
class PropertyCallbackArguments
: public CustomArguments<PropertyCallbackInfo<Value> > {
public:
typedef PropertyCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
JSObject* holder, Object::ShouldThrow should_throw)
: Super(isolate) {
Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
values[T::kShouldThrowOnErrorIndex] =
Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
* The following Call functions wrap the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
#define WRITE_CALL_0(Function, ReturnValue) \
v8::Local<ReturnValue> Call(Function f);
#define WRITE_CALL_1(Function, ReturnValue, Arg1) \
v8::Local<ReturnValue> Call(Function f, Arg1 arg1);
#define WRITE_CALL_2(Function, ReturnValue, Arg1, Arg2) \
v8::Local<ReturnValue> Call(Function f, Arg1 arg1, Arg2 arg2);
#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
void Call(Function f, Arg1 arg1, Arg2 arg2); \
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
FOR_EACH_CALLBACK_TABLE_MAPPING_1(WRITE_CALL_1)
FOR_EACH_CALLBACK_TABLE_MAPPING_2(WRITE_CALL_2)
FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
#undef WRITE_CALL_0
#undef WRITE_CALL_1
#undef WRITE_CALL_2
#undef WRITE_CALL_2_VOID
};
class FunctionCallbackArguments
: public CustomArguments<FunctionCallbackInfo<Value> > {
public:
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
internal::HeapObject* callee,
internal::Object* holder, internal::Object** argv,
int argc, bool is_construct_call)
: Super(isolate),
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
values[T::kCalleeIndex]->IsFunctionTemplateInfo());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
* The following Call function wraps the calling of all callbacks to handle
* calling either the old or the new style callbacks depending on which one
* has been registered.
* For old callbacks which return an empty handle, the ReturnValue is checked
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
v8::Local<v8::Value> Call(FunctionCallback f);
private:
internal::Object** argv_;
int argc_;
bool is_construct_call_;
};
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#ifdef DEBUG
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
#else
#define CLOBBER_DOUBLE_REGISTERS()
#endif
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \
RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
RuntimeCallTimerScope timer(isolate, &stats->Name); \
Arguments args(args_length, args_object); \
Type value = __RT_impl_##Name(args, isolate); \
return value; \
} \
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \
Type value; \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name); \
Arguments args(args_length, args_object); \
if (FLAG_runtime_call_stats) { \
RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
RuntimeCallTimerScope timer(isolate, &stats->Name); \
value = __RT_impl_##Name(args, isolate); \
} else { \
value = __RT_impl_##Name(args, isolate); \
} \
return value; \
} \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)

16
deps/v8/src/arm/assembler-arm-inl.h

@ -71,6 +71,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@ -114,6 +118,18 @@ void RelocInfo::set_target_address(Address target,
}
}
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, size_t old_size, size_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK(old_base <= wasm_memory_reference() &&
wasm_memory_reference() < old_base + old_size);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
DCHECK(new_base <= updated_reference &&
updated_reference < new_base + new_size);
Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
icache_flush_mode);
}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);

41
deps/v8/src/arm/assembler-arm.cc

@ -145,7 +145,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
supported_ |= 1u << COHERENT_CACHE;
// TODO(jkummerow): This is turned off as an experiment to see if it
// affects crash rates. Keep an eye on crash reports and either remove
// coherent cache support permanently, or re-enable it!
// supported_ |= 1u << COHERENT_CACHE;
}
#endif
@ -1966,7 +1969,8 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
DCHECK(fields >= B16 && fields < B20); // at least one field set
DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
if (!src.rm_.is_valid()) {
// Immediate.
@ -2546,12 +2550,6 @@ void Assembler::vstm(BlockAddrMode am,
}
void Assembler::vmov(const SwVfpRegister dst, float imm) {
mov(ip, Operand(bit_cast<int32_t>(imm)));
vmov(dst, ip);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@ -2563,7 +2561,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
DCHECK(CpuFeatures::IsSupported(VFP3));
// VMOV can accept an immediate of the form:
@ -2592,12 +2590,12 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
return false;
}
// Bits 62:55 must be all clear or all set.
// Bits 61:54 must be all clear or all set.
if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
return false;
}
// Bit 63 must be NOT bit 62.
// Bit 62 must be NOT bit 61.
if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
return false;
}
@ -2612,6 +2610,25 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
}
void Assembler::vmov(const SwVfpRegister dst, float imm) {
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
// The float can be encoded in the instruction.
//
// Sd = immediate
// Instruction details available in ARM DDI 0406C.b, A8-936.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
} else {
mov(ip, Operand(bit_cast<int32_t>(imm)));
vmov(dst, ip);
}
}
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
@ -2622,7 +2639,7 @@ void Assembler::vmov(const DwVfpRegister dst,
// pointer (pp) is valid.
bool can_use_pool =
!FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
if (CpuFeatures::IsSupported(VFP3) && FitsVmovFPImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
//
// Dd = immediate

8
deps/v8/src/arm/assembler-arm.h

@ -1390,7 +1390,9 @@ class Assembler : public AssemblerBase {
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
AssemblerPositionsRecorder* positions_recorder() {
return &positions_recorder_;
}
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@ -1637,8 +1639,8 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
AssemblerPositionsRecorder positions_recorder_;
friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};

147
deps/v8/src/arm/builtins-arm.cc

@ -531,6 +531,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r1 : constructor function
// -- r2 : allocation site or undefined
// -- r3 : new target
// -- cp : context
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@ -543,6 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r2, r4);
__ Push(cp);
__ SmiTag(r0);
__ Push(r2, r0);
@ -622,7 +624,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@ -751,9 +753,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
__ mov(cp, Operand::Zero());
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
@ -855,8 +854,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ PushStandardFrame(r1);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@ -1192,8 +1190,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ PushStandardFrame(r1);
// Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
@ -1430,24 +1427,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// We check the stack limit as indicator that recompilation might be done.
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
__ bind(&ok);
__ Ret();
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@ -1494,6 +1473,27 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
// static
void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- sp[0] : first argument (left-hand side)
// -- sp[4] : receiver (right-hand side)
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ ldr(InstanceOfDescriptor::LeftRegister(),
MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
__ ldr(InstanceOfDescriptor::RightRegister(),
MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
InstanceOfStub stub(masm->isolate(), true);
__ CallStub(&stub);
}
// Pop the argument and the receiver.
__ Ret(2);
}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@ -1933,19 +1933,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
// Prepare for tail call only if the debugger is not active.
// Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(masm->isolate());
__ mov(scratch1, Operand(debug_is_active));
ExternalReference is_tail_call_elimination_enabled =
ExternalReference::is_tail_call_elimination_enabled_address(
masm->isolate());
__ mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ ldrb(scratch1, MemOperand(scratch1));
__ cmp(scratch1, Operand(0));
__ b(ne, &done);
__ b(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
__ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
__ ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ b(ne, &no_interpreter_frame);
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1953,73 +1955,37 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
// Drop arguments adaptor frame and load arguments count.
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
__ ldr(scratch1,
__ ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(scratch1);
__ SmiUntag(caller_args_count_reg);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
__ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
__ ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(scratch1);
__ SmiUntag(caller_args_count_reg);
__ bind(&formal_parameter_count_loaded);
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch2;
__ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
__ add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = scratch1;
__ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
// Count receiver argument as well (not included in args_reg).
__ add(src_reg, src_reg, Operand(kPointerSize));
if (FLAG_debug_code) {
__ cmp(src_reg, dst_reg);
__ Check(lo, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
__ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch3;
Label loop, entry;
__ b(&entry);
__ bind(&loop);
__ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
__ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
__ bind(&entry);
__ cmp(sp, src_reg);
__ b(ne, &loop);
// Leave current frame.
__ mov(sp, dst_reg);
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3);
__ bind(&done);
}
} // namespace
@ -2473,27 +2439,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
// If the function is strong we need to throw an error.
Label no_strong_error;
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
kSmiTagSize)));
__ b(eq, &no_strong_error);
// What we really care about is the required number of arguments.
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
__ cmp(r0, Operand::SmiUntag(r4));
__ b(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);

248
deps/v8/src/arm/code-stubs-arm.cc

@ -4,9 +4,10 @@
#if V8_TARGET_ARCH_ARM
#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@ -77,6 +78,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@ -477,7 +482,9 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
}
// Fast negative check for internalized-to-internalized equality.
// Fast negative check for internalized-to-internalized equality or receiver
// equality. Also handles the undetectable receiver to null/undefined
// comparison.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
@ -486,7 +493,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
Label object_test, return_unequal, undetectable;
Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
@ -524,6 +531,16 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ bind(&undetectable);
__ tst(r5, Operand(1 << Map::kIsUndetectable));
__ b(eq, &return_unequal);
// If both sides are JSReceivers, then the result is false according to
// the HTML specification, which says that only comparisons with null or
// undefined are affected by special casing for document.all.
__ CompareInstanceType(r2, r2, ODDBALL_TYPE);
__ b(eq, &return_equal);
__ CompareInstanceType(r3, r3, ODDBALL_TYPE);
__ b(ne, &return_unequal);
__ bind(&return_equal);
__ mov(r0, Operand(EQUAL));
__ Ret();
}
@ -1049,9 +1066,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
__ ldr(r2, MemOperand(r0, 2 * kPointerSize));
__ ldr(r1, MemOperand(r0, 1 * kPointerSize));
__ ldr(r0, MemOperand(r0, 0 * kPointerSize));
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
@ -1358,8 +1375,12 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow_case);
// Ensure that {function} has an instance prototype.
// Go to the runtime if the function is not a constructor.
__ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ tst(scratch, Operand(1 << Map::kIsConstructor));
__ b(eq, &slow_case);
// Ensure that {function} has an instance prototype.
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
@ -1427,7 +1448,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
__ TailCallRuntime(Runtime::kInstanceOf);
__ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
: Runtime::kInstanceOf);
}
@ -1480,29 +1502,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
// Check that the key is an array index, that is Uint32.
__ NonNegativeSmiTst(key);
__ b(ne, &slow);
// Everything is fine, call runtime.
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@ -2633,29 +2632,28 @@ void SubStringStub::Generate(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r0.
Label not_smi;
__ JumpIfNotSmi(r0, &not_smi);
__ Ret();
__ bind(&not_smi);
STATIC_ASSERT(kSmiTag == 0);
__ tst(r0, Operand(kSmiTagMask));
__ Ret(eq);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
// r0: receiver
// r1: receiver instance type
__ Ret(eq);
Label not_string, slow_string;
__ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
__ b(hs, &not_string);
// Check if string has a cached array index.
__ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
__ b(ne, &slow_string);
__ IndexFromHash(r2, r0);
__ Ret();
__ bind(&slow_string);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
NonNumberToNumberStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
// The NonNumberToNumber stub takes one argument in r0.
__ AssertNotNumber(r0);
__ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
// r0: receiver
// r1: receiver instance type
StringToNumberStub stub(masm->isolate());
__ TailCallStub(&stub, lo);
Label not_oddball;
__ cmp(r1, Operand(ODDBALL_TYPE));
@ -2664,26 +2662,27 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_oddball);
__ push(r0); // Push argument.
__ Push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToNumber);
}
void StringToNumberStub::Generate(MacroAssembler* masm) {
// The StringToNumber stub takes one argument in r0.
__ AssertString(r0);
void ToLengthStub::Generate(MacroAssembler* masm) {
// The ToLength stub takes one argument in r0.
Label not_smi;
__ JumpIfNotSmi(r0, &not_smi);
STATIC_ASSERT(kSmiTag == 0);
__ tst(r0, r0);
__ mov(r0, Operand(0), LeaveCC, lt);
// Check if string has a cached array index.
Label runtime;
__ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
__ b(ne, &runtime);
__ IndexFromHash(r2, r0);
__ Ret();
__ bind(&not_smi);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kToLength);
__ bind(&runtime);
__ Push(r0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber);
}
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r0.
Label is_number;
@ -2839,42 +2838,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
void StringCompareStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
// -- r0 : right
// -- lr : return address
// -----------------------------------
__ AssertString(r1);
__ AssertString(r0);
Label not_same;
__ cmp(r0, r1);
__ b(ne, &not_same);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
r2);
__ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
// Compare flat one-byte strings natively.
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
r3);
StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kStringCompare);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
@ -3168,10 +3131,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(left, right);
__ CallRuntime(Runtime::kStringEqual);
}
__ LoadRoot(r1, Heap::kTrueValueRootIndex);
__ sub(r0, r0, r1);
__ Ret();
} else {
__ Push(left, right);
__ TailCallRuntime(Runtime::kStringCompare);
}
@ -3710,7 +3680,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
@ -4703,7 +4673,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@ -4712,7 +4682,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_rest_parameters);
@ -4851,7 +4821,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
__ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
@ -5050,7 +5020,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ bind(&loop);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ bind(&loop_entry);
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(ne, &loop);
}
@ -5058,7 +5028,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
__ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
@ -5424,16 +5394,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
bool call_data_undefined, bool is_lazy) {
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
// -- r3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@ -5459,11 +5425,9 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
DCHECK(argc.is_immediate() || r3.is(argc.reg()));
// context save
__ push(context);
if (!is_lazy) {
if (!is_lazy()) {
// load context from callee
__ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
@ -5475,7 +5439,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ push(call_data);
Register scratch = call_data;
if (!call_data_undefined) {
if (!call_data_undefined()) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
@ -5504,29 +5468,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
if (argc.is_immediate()) {
// FunctionCallbackInfo::values_
__ add(ip, scratch,
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc.immediate()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_ = 0
__ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
} else {
// FunctionCallbackInfo::values_
__ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
__ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_
__ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
__ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
__ str(ip, MemOperand(r0, 3 * kPointerSize));
}
// FunctionCallbackInfo::values_
__ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_ = 0
__ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@ -5536,7 +5486,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
if (return_first_arg) {
if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@ -5545,33 +5495,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
int stack_space = 0;
MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
if (argc.is_immediate()) {
stack_space = argc.immediate() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
}
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
}
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
call_data_undefined, false);
}
void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name

4
deps/v8/src/arm/codegen-arm.cc

@ -898,10 +898,8 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->PushStandardFrame(r1);
patcher->masm()->nop(ip.code());
patcher->masm()->add(
fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}

2
deps/v8/src/arm/constants-arm.h

@ -654,7 +654,7 @@ class Instruction {
inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
// Decoding the double immediate in the vmov instruction.
// Decode the double immediate from a vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a

13
deps/v8/src/arm/deoptimizer-arm.cc

@ -103,12 +103,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on ARM in the input frame.
return false;
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@ -162,7 +156,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6, r5);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
@ -235,6 +234,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
__ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;

30
deps/v8/src/arm/disasm-arm.cc

@ -604,6 +604,26 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
Print("s");
}
return 4;
} else if (format[1] == 'p') {
if (format[8] == '_') { // 'spec_reg_fields
DCHECK(STRING_STARTS_WITH(format, "spec_reg_fields"));
Print("_");
int mask = instr->Bits(19, 16);
if (mask == 0) Print("(none)");
if ((mask & 0x8) != 0) Print("f");
if ((mask & 0x4) != 0) Print("s");
if ((mask & 0x2) != 0) Print("x");
if ((mask & 0x1) != 0) Print("c");
return 15;
} else { // 'spec_reg
DCHECK(STRING_STARTS_WITH(format, "spec_reg"));
if (instr->Bit(22) == 0) {
Print("CPSR");
} else {
Print("SPSR");
}
return 8;
}
}
// 's: S field of data processing instructions
if (instr->HasS()) {
@ -822,7 +842,13 @@ void Decoder::DecodeType01(Instruction* instr) {
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
(instr->Bits(15, 4) == 0xf00)) {
Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
} else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
(instr->Bits(11, 0) == 0)) {
Format(instr, "mrs'cond 'rd, 'spec_reg");
} else if (instr->Bits(22, 21) == 1) {
switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
@ -1404,7 +1430,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vmov'cond.f32 'Sd, 'd");
}
} else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
// vrintz - round towards zero (truncate)

15
deps/v8/src/arm/frames-arm.h

@ -93,16 +93,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kFrameSize =
FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
static const int kConstantPoolOffset =
FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
@ -120,7 +115,7 @@ class JavaScriptFrameConstants : public AllStatic {
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;

72
deps/v8/src/arm/interface-descriptors-arm.cc

@ -111,35 +111,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register ToLengthDescriptor::ReceiverRegister() { return r0; }
// static
const Register ToStringDescriptor::ReceiverRegister() { return r0; }
// static
const Register ToNameDescriptor::ReceiverRegister() { return r0; }
// static
const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -267,6 +240,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Descriptor::InitializePlatformSpecific( \
CallInterfaceDescriptorData* data) { \
data->InitializePlatformSpecific(0, nullptr, nullptr); \
}
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -311,24 +291,16 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -408,25 +380,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
r3, // actual number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);

371
deps/v8/src/arm/macro-assembler-arm.cc

@ -738,12 +738,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
if (and_then == kFallThroughAtEnd) {
b(eq, &done);
b(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
Ret(eq);
Ret(ne);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
@ -755,20 +755,65 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
}
}
void MacroAssembler::PushFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
if (FLAG_enable_embedded_constant_pool) {
if (marker_reg.code() > pp.code()) {
stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
add(fp, sp, Operand(kPointerSize));
Push(marker_reg);
} else {
stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
add(fp, sp, Operand(2 * kPointerSize));
}
} else {
if (marker_reg.code() > fp.code()) {
stm(db_w, sp, fp.bit() | lr.bit());
mov(fp, Operand(sp));
Push(marker_reg);
} else {
stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
add(fp, sp, Operand(kPointerSize));
}
}
} else {
stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
}
}
void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
if (FLAG_enable_embedded_constant_pool) {
if (marker_reg.code() > pp.code()) {
pop(marker_reg);
ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
} else {
ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
}
} else {
if (marker_reg.code() > fp.code()) {
pop(marker_reg);
ldm(ia_w, sp, fp.bit() | lr.bit());
} else {
ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
}
}
} else {
ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
}
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
void MacroAssembler::PushStandardFrame(Register function_reg) {
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
int offset = -StandardFrameConstants::kContextOffset;
offset += function_reg.is_valid() ? kPointerSize : 0;
add(fp, sp, Operand(offset));
}
@ -1056,7 +1101,144 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
vmov(dst, VmovIndexLo, src);
}
}
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
DCHECK(!AreAliased(dst_high, shift));
Label less_than_32;
Label done;
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
and_(scratch, shift, Operand(0x1f));
lsl(dst_high, src_low, Operand(scratch));
mov(dst_low, Operand(0));
jmp(&done);
bind(&less_than_32);
// If shift < 32
lsl(dst_high, src_high, Operand(shift));
orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
lsl(dst_low, src_low, Operand(shift));
bind(&done);
}
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_high, src_low));
Label less_than_32;
Label done;
if (shift == 0) {
Move(dst_high, src_high);
Move(dst_low, src_low);
} else if (shift == 32) {
Move(dst_high, src_low);
Move(dst_low, Operand(0));
} else if (shift >= 32) {
shift &= 0x1f;
lsl(dst_high, src_low, Operand(shift));
mov(dst_low, Operand(0));
} else {
lsl(dst_high, src_high, Operand(shift));
orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
lsl(dst_low, src_low, Operand(shift));
}
}
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
Label less_than_32;
Label done;
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
and_(scratch, shift, Operand(0x1f));
lsr(dst_low, src_high, Operand(scratch));
mov(dst_high, Operand(0));
jmp(&done);
bind(&less_than_32);
// If shift < 32
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
lsr(dst_high, src_high, Operand(shift));
bind(&done);
}
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
Label less_than_32;
Label done;
if (shift == 32) {
mov(dst_low, src_high);
mov(dst_high, Operand(0));
} else if (shift > 32) {
shift &= 0x1f;
lsr(dst_low, src_high, Operand(shift));
mov(dst_high, Operand(0));
} else if (shift == 0) {
Move(dst_low, src_low);
Move(dst_high, src_high);
} else {
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
lsr(dst_high, src_high, Operand(shift));
}
}
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
Label less_than_32;
Label done;
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
and_(scratch, shift, Operand(0x1f));
asr(dst_low, src_high, Operand(scratch));
asr(dst_high, src_high, Operand(31));
jmp(&done);
bind(&less_than_32);
// If shift < 32
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
asr(dst_high, src_high, Operand(shift));
bind(&done);
}
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
Label less_than_32;
Label done;
if (shift == 32) {
mov(dst_low, src_high);
asr(dst_high, src_high, Operand(31));
} else if (shift > 32) {
shift &= 0x1f;
asr(dst_low, src_high, Operand(shift));
asr(dst_high, src_high, Operand(31));
} else if (shift == 0) {
Move(dst_low, src_low);
Move(dst_high, src_high);
} else {
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
asr(dst_high, src_high, Operand(shift));
}
}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@ -1074,19 +1256,15 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
void MacroAssembler::StubPrologue() {
PushFixedFrame();
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
void MacroAssembler::StubPrologue(StackFrame::Type type) {
mov(ip, Operand(Smi::FromInt(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_constant_pool_available(true);
}
}
void MacroAssembler::Prologue(bool code_pre_aging) {
{ PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@ -1099,10 +1277,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
ldr(pc, MemOperand(pc, -4));
emit_code_stub_address(stub);
} else {
PushFixedFrame(r1);
PushStandardFrame(r1);
nop(ip.code());
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
if (FLAG_enable_embedded_constant_pool) {
@ -1123,17 +1299,15 @@ void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
mov(ip, Operand(Smi::FromInt(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
push(ip);
// Adjust FP to point to saved FP.
add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
if (type == StackFrame::INTERNAL) {
mov(ip, Operand(CodeObject()));
push(ip);
}
}
@ -1164,10 +1338,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
mov(fp, Operand(sp)); // Set up new frame pointer.
mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(ExitFrameConstants::kFrameSize));
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
@ -1249,7 +1423,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
const int offset = ExitFrameConstants::kFrameSize;
const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
@ -1300,6 +1474,64 @@ void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
if (callee_args_count.is_reg()) {
DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
scratch1));
} else {
DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
}
#endif
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
add(src_reg, src_reg, Operand(kPointerSize));
} else {
add(src_reg, sp,
Operand((callee_args_count.immediate() + 1) * kPointerSize));
}
if (FLAG_debug_code) {
cmp(src_reg, dst_reg);
Check(lo, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop, entry;
b(&entry);
bind(&loop);
ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
cmp(sp, src_reg);
b(ne, &loop);
// Leave current frame.
mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@ -1578,8 +1810,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!holder_reg.is(ip));
DCHECK(!scratch.is(ip));
// Load current lexical context from the stack frame.
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load current lexical context from the active StandardFrame, which
// may require crawling past STUB frames.
Label load_context;
Label has_context;
DCHECK(!ip.is(scratch));
mov(ip, fp);
bind(&load_context);
ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
JumpIfNotSmi(scratch, &has_context);
ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
b(&load_context);
bind(&has_context);
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
cmp(scratch, Operand::Zero());
@ -2803,6 +3046,17 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
b(eq, on_either_smi);
}
void MacroAssembler::AssertNotNumber(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsANumber);
push(object);
CompareObjectType(object, object, object, HEAP_NUMBER_TYPE);
pop(object);
Check(ne, kOperandIsANumber);
}
}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
@ -3510,29 +3764,46 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
b(ne, &next);
}
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
Label map_check;
Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
add(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
b(lt, no_memento_found);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
eor(scratch_reg, scratch_reg, Operand(receiver_reg));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(ne, no_memento_found);
// Continue with the actual map check.
jmp(&map_check);
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
cmp(scratch_reg, Operand(new_space_allocation_top));
b(gt, no_memento_found);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
Operand(isolate()->factory()->allocation_memento_map()));
// Memento map check.
bind(&map_check);
ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,

39
deps/v8/src/arm/macro-assembler-arm.h

@ -457,10 +457,14 @@ class MacroAssembler: public Assembler {
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
// FLAG_enable_embedded_constant_pool), context and JS function / marker id if
// marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
// FLAG_enable_embedded_constant_pool)
void PushCommonFrame(Register marker_reg = no_reg);
// Push a standard frame, consisting of lr, fp, constant pool (if
// FLAG_enable_embedded_constant_pool), context and JS function
void PushStandardFrame(Register function_reg);
void PopCommonFrame(Register marker_reg = no_reg);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
@ -545,6 +549,19 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
@ -580,7 +597,7 @@ class MacroAssembler: public Assembler {
Label* not_int32);
// Generates function and stub prologue code.
void StubPrologue();
void StubPrologue(StackFrame::Type type);
void Prologue(bool code_pre_aging);
// Enter exit frame.
@ -637,6 +654,15 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
@ -1280,6 +1306,9 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a number, enabled via --debug-code.
void AssertNotNumber(Register object);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);

61
deps/v8/src/arm/simulator-arm.cc

@ -1041,6 +1041,32 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
return value;
}
void Simulator::SetSpecialRegister(SRegisterFieldMask reg_and_mask,
uint32_t value) {
// Only CPSR_f is implemented. Of that, only N, Z, C and V are implemented.
if ((reg_and_mask == CPSR_f) && ((value & ~kSpecialCondition) == 0)) {
n_flag_ = ((value & (1 << 31)) != 0);
z_flag_ = ((value & (1 << 30)) != 0);
c_flag_ = ((value & (1 << 29)) != 0);
v_flag_ = ((value & (1 << 28)) != 0);
} else {
UNIMPLEMENTED();
}
}
uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
uint32_t result = 0;
// Only CPSR_f is implemented.
if (reg == CPSR) {
if (n_flag_) result |= (1 << 31);
if (z_flag_) result |= (1 << 30);
if (c_flag_) result |= (1 << 29);
if (v_flag_) result |= (1 << 28);
} else {
UNIMPLEMENTED();
}
return result;
}
// Runtime FP routines take:
// - two double arguments
@ -1307,11 +1333,12 @@ bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
// Calculate C flag value for subtractions.
bool Simulator::BorrowFrom(int32_t left, int32_t right) {
bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
return (uright > uleft);
return (uright > uleft) ||
(!carry && (((uright + 1) > uleft) || (uright > (uleft - 1))));
}
@ -2312,7 +2339,22 @@ void Simulator::DecodeType01(Instruction* instr) {
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
(instr->Bits(15, 4) == 0xf00)) {
// MSR
int rm = instr->RmValue();
DCHECK_NE(pc, rm); // UNPREDICTABLE
SRegisterFieldMask sreg_and_mask =
instr->BitField(22, 22) | instr->BitField(19, 16);
SetSpecialRegister(sreg_and_mask, get_register(rm));
} else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
(instr->Bits(11, 0) == 0)) {
// MRS
int rd = instr->RdValue();
DCHECK_NE(pc, rd); // UNPREDICTABLE
SRegister sreg = static_cast<SRegister>(instr->BitField(22, 22));
set_register(rd, GetFromSpecialRegister(sreg));
} else if (instr->Bits(22, 21) == 1) {
int rm = instr->RmValue();
switch (instr->BitField(7, 4)) {
case BX:
@ -2452,8 +2494,15 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case SBC: {
Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
// Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
alu_out = (rn_val - shifter_operand) - (GetCarry() ? 0 : 1);
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
SetCFlag(!BorrowFrom(rn_val, shifter_operand, GetCarry()));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
}
break;
}
@ -3215,7 +3264,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
UNREACHABLE(); // Not used by v8.
set_s_register_from_float(d, instr->DoubleImmedVmov());
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate

5
deps/v8/src/arm/simulator-arm.h

@ -262,7 +262,7 @@ class Simulator {
void SetCFlag(bool val);
void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right);
bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
bool OverflowFrom(int32_t alu_out,
int32_t left,
int32_t right,
@ -363,6 +363,9 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
uint32_t GetFromSpecialRegister(SRegister reg);
void CallInternal(byte* entry);
// Architecture state.

16
deps/v8/src/arm64/assembler-arm64-inl.h

@ -41,6 +41,18 @@ void RelocInfo::set_target_address(Address target,
}
}
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, size_t old_size, size_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK(old_base <= wasm_memory_reference() &&
wasm_memory_reference() < old_base + old_size);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
DCHECK(new_base <= updated_reference &&
updated_reference < new_base + new_size);
Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
icache_flush_mode);
}
inline int CPURegister::code() const {
DCHECK(IsValid());
@ -693,6 +705,10 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)

10
deps/v8/src/arm64/assembler-arm64.cc

@ -56,7 +56,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
supported_ |= 1u << COHERENT_CACHE;
// TODO(jkummerow): This is turned off as an experiment to see if it
// affects crash rates. Keep an eye on crash reports and either remove
// coherent cache support permanently, or re-enable it!
// supported_ |= 1u << COHERENT_CACHE;
}
}
@ -437,7 +440,8 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) ||
(!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
(!assm_->serializer_enabled() &&
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
}
@ -2871,7 +2875,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||

8
deps/v8/src/arm64/assembler-arm64.h

@ -922,7 +922,9 @@ class Assembler : public AssemblerBase {
}
// Debugging ----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
AssemblerPositionsRecorder* positions_recorder() {
return &positions_recorder_;
}
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
@ -2135,8 +2137,8 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private:
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
AssemblerPositionsRecorder positions_recorder_;
friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};

142
deps/v8/src/arm64/builtins-arm64.cc

@ -518,6 +518,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- x2 : allocation site or undefined
// -- x3 : new target
// -- lr : return address
// -- cp : context pointer
// -- sp[...]: constructor arguments
// -----------------------------------
@ -537,6 +538,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ Push(cp);
__ SmiTag(argc);
__ Push(allocation_site, argc);
@ -623,7 +625,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: result
// jssp[0]: receiver
// jssp[1]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
@ -763,9 +765,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
__ Mov(cp, 0);
{
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@ -1394,23 +1393,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// We check the stack limit as indicator that recompilation might be done.
Label ok;
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
__ Bind(&ok);
__ Ret();
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
@ -1456,6 +1438,29 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
// static
void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
// -- jssp[0] : first argument (left-hand side)
// -- jssp[8] : receiver (right-hand side)
// -----------------------------------
ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Ldr(InstanceOfDescriptor::LeftRegister(),
MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
__ Ldr(InstanceOfDescriptor::RightRegister(),
MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
InstanceOfStub stub(masm->isolate(), true);
__ CallStub(&stub);
}
// Pop the argument and the receiver.
__ Drop(2);
__ Ret();
}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
@ -1972,19 +1977,21 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
// Prepare for tail call only if the debugger is not active.
// Prepare for tail call only if ES2015 tail call elimination is enabled.
Label done;
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(masm->isolate());
__ Mov(scratch1, Operand(debug_is_active));
ExternalReference is_tail_call_elimination_enabled =
ExternalReference::is_tail_call_elimination_enabled_address(
masm->isolate());
__ Mov(scratch1, Operand(is_tail_call_elimination_enabled));
__ Ldrb(scratch1, MemOperand(scratch1));
__ Cmp(scratch1, Operand(0));
__ B(ne, &done);
__ B(eq, &done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
__ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
__ Ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ B(ne, &no_interpreter_frame);
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1992,18 +1999,19 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
}
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
// Drop arguments adaptor frame and load arguments count.
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
__ Ldr(scratch1,
__ Ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(scratch1);
__ SmiUntag(caller_args_count_reg);
__ B(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
@ -2011,54 +2019,14 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(scratch1,
__ Ldrsw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
__ bind(&formal_parameter_count_loaded);
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch2;
__ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
__ add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = scratch1;
__ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
// Count receiver argument as well (not included in args_reg).
__ add(src_reg, src_reg, Operand(kPointerSize));
if (FLAG_debug_code) {
__ Cmp(src_reg, dst_reg);
__ Check(lo, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
__ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch3;
Label loop, entry;
__ B(&entry);
__ bind(&loop);
__ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
__ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
__ bind(&entry);
__ Cmp(jssp, src_reg);
__ B(ne, &loop);
// Leave current frame.
__ Mov(jssp, dst_reg);
__ SetStackPointer(jssp);
__ AssertStackConsistency();
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3);
__ bind(&done);
}
} // namespace
@ -2610,30 +2578,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
// If the function is strong we need to throw an error.
Label no_strong_error;
__ Ldr(scratch1,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAllClear(scratch2.W(),
(1 << SharedFunctionInfo::kStrongModeFunction),
&no_strong_error);
// What we really care about is the required number of arguments.
DCHECK_EQ(kPointerSize, kInt64Size);
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
__ Cmp(argc_actual, Operand(scratch2, LSR, 1));
__ B(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);

240
deps/v8/src/arm64/code-stubs-arm64.cc

@ -4,8 +4,9 @@
#if V8_TARGET_ARCH_ARM64
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/api-arguments.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@ -81,6 +82,10 @@ void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
@ -425,7 +430,9 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Fast negative check for internalized-to-internalized equality.
// Fast negative check for internalized-to-internalized equality or receiver
// equality. Also handles the undetectable receiver to null/undefined
// comparison.
// See call site for description.
static void EmitCheckForInternalizedStringsOrObjects(
MacroAssembler* masm, Register left, Register right, Register left_map,
@ -435,7 +442,7 @@ static void EmitCheckForInternalizedStringsOrObjects(
Register result = x0;
DCHECK(left.is(x0) || right.is(x0));
Label object_test, return_unequal, undetectable;
Label object_test, return_equal, return_unequal, undetectable;
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// TODO(all): reexamine this branch sequence for optimisation wrt branch
// prediction.
@ -463,12 +470,22 @@ static void EmitCheckForInternalizedStringsOrObjects(
__ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, runtime_call);
__ bind(&return_unequal);
__ Bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in x0.
__ Ret();
__ bind(&undetectable);
__ Bind(&undetectable);
__ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
// If both sides are JSReceivers, then the result is false according to
// the HTML specification, which says that only comparisons with null or
// undefined are affected by special casing for document.all.
__ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
__ B(eq, &return_equal);
__ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
__ B(ne, &return_unequal);
__ Bind(&return_equal);
__ Mov(result, EQUAL);
__ Ret();
}
@ -1324,7 +1341,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
__ Push(x13, xzr, x12, x10);
__ Push(x13, x12, xzr, x10);
// Set up fp.
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
@ -1544,8 +1561,11 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
&slow_case);
// Ensure that {function} has an instance prototype.
// Go to the runtime if the function is not a constructor.
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ Tbz(scratch, Map::kIsConstructor, &slow_case);
// Ensure that {function} has an instance prototype.
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
// Get the "prototype" (or initial map) of the {function}.
@ -1612,27 +1632,8 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
__ TailCallRuntime(Runtime::kInstanceOf);
}
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
// Check that the key is an array index, that is Uint32.
__ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
// Everything is fine, call runtime.
__ Push(receiver, key);
__ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
__ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
: Runtime::kInstanceOf);
}
@ -2856,10 +2857,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ Bind(&runtime);
__ Push(lhs, rhs);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs);
__ CallRuntime(Runtime::kStringEqual);
}
__ LoadRoot(x1, Heap::kTrueValueRootIndex);
__ Sub(x0, x0, x1);
__ Ret();
} else {
__ Push(lhs, rhs);
__ TailCallRuntime(Runtime::kStringCompare);
}
@ -3227,27 +3235,28 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_smi);
Label not_heap_number;
__ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
__ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
// x0: object
// x1: instance type
__ Cmp(x1, HEAP_NUMBER_TYPE);
__ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
// x0: receiver
// x1: receiver instance type
__ B(ne, &not_heap_number);
__ Ret();
__ Bind(&not_heap_number);
Label not_string, slow_string;
__ Cmp(x1, FIRST_NONSTRING_TYPE);
NonNumberToNumberStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
// The NonNumberToNumber stub takes one argument in x0.
__ AssertNotNumber(x0);
Label not_string;
__ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
// x0: receiver
// x1: receiver instance type
__ B(hs, &not_string);
// Check if string has a cached array index.
__ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
__ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
__ B(ne, &slow_string);
__ IndexFromHash(x2, x0);
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber);
StringToNumberStub stub(masm->isolate());
__ TailCallStub(&stub);
__ Bind(&not_string);
Label not_oddball;
@ -3261,22 +3270,23 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToNumber);
}
void StringToNumberStub::Generate(MacroAssembler* masm) {
// The StringToNumber stub takes one argument in x0.
__ AssertString(x0);
void ToLengthStub::Generate(MacroAssembler* masm) {
// The ToLength stub takes one argument in x0.
Label not_smi;
__ JumpIfNotSmi(x0, &not_smi);
STATIC_ASSERT(kSmiTag == 0);
__ Tst(x0, x0);
__ Csel(x0, x0, Operand(0), ge);
// Check if string has a cached array index.
Label runtime;
__ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
__ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
__ B(ne, &runtime);
__ IndexFromHash(x2, x0);
__ Ret();
__ Bind(&not_smi);
__ Bind(&runtime);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kToLength);
__ TailCallRuntime(Runtime::kStringToNumber);
}
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in x0.
Label is_number;
@ -3449,43 +3459,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
void StringCompareStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left
// -- x0 : right
// -- lr : return address
// -----------------------------------
__ AssertString(x1);
__ AssertString(x0);
Label not_same;
__ Cmp(x0, x1);
__ B(ne, &not_same);
__ Mov(x0, Smi::FromInt(EQUAL));
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
x4);
__ Ret();
__ Bind(&not_same);
// Check that both objects are sequential one-byte strings.
Label runtime;
__ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
// Compare flat one-byte strings natively.
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
x4);
StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
x15);
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ Bind(&runtime);
__ Push(x1, x0);
__ TailCallRuntime(Runtime::kStringCompare);
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left
@ -3682,7 +3655,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
@ -4972,7 +4945,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@ -4980,8 +4953,8 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Check if we have rest parameters (only possible if we have an
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
__ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
__ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &no_rest_parameters);
@ -5137,8 +5110,9 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Label runtime;
Label adaptor_frame, try_allocate;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(caller_ctx, MemOperand(caller_fp,
StandardFrameConstants::kContextOffset));
__ Ldr(
caller_ctx,
MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &adaptor_frame);
@ -5401,7 +5375,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Bind(&loop);
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Bind(&loop_entry);
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
__ B(ne, &loop);
}
@ -5409,7 +5383,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
__ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
@ -5804,16 +5778,12 @@ static void CallApiFunctionAndReturn(
__ B(&leave_exit_frame);
}
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
bool call_data_undefined, bool is_lazy) {
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
// -- x3 : number of arguments if argc is a register
// -- cp : context
// --
// -- sp[0] : last argument
@ -5839,17 +5809,15 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
DCHECK(argc.is_immediate() || x3.is(argc.reg()));
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
if (!is_lazy) {
if (!is_lazy()) {
// Load context from callee
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
if (!call_data_undefined) {
if (!call_data_undefined()) {
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
}
Register isolate_reg = x5;
@ -5878,26 +5846,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
if (argc.is_immediate()) {
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args,
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
// FunctionCallbackInfo::length_ = argc and
// FunctionCallbackInfo::is_construct_call = 0
__ Mov(x10, argc.immediate());
__ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
} else {
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand(argc.reg(), LSL, kPointerSizeLog2));
__ Add(x10, x10, (FCA::kArgsLength - 1) * kPointerSize);
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
// FunctionCallbackInfo::length_ = argc and
// FunctionCallbackInfo::is_construct_call
__ Add(x10, argc.reg(), FCA::kArgsLength + 1);
__ Mov(x10, Operand(x10, LSL, kPointerSizeLog2));
__ Stp(argc.reg(), x10, MemOperand(x0, 2 * kPointerSize));
}
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
// FunctionCallbackInfo::length_ = argc and
// FunctionCallbackInfo::is_construct_call = 0
__ Mov(x10, argc());
__ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@ -5907,7 +5862,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
if (return_first_arg) {
if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
@ -5917,10 +5872,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
MemOperand is_construct_call_operand =
MemOperand(masm->StackPointer(), 4 * kPointerSize);
MemOperand* stack_space_operand = &is_construct_call_operand;
if (argc.is_immediate()) {
stack_space = argc.immediate() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
}
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@ -5929,23 +5882,6 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
}
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
call_data_undefined, false);
}
void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- sp[0] : name

2
deps/v8/src/arm64/cpu-arm64.cc

@ -19,7 +19,7 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
__asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT
__asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
: [ctr] "=r"(cache_type_register_));
#endif
}

16
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -65,12 +65,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on ARM64 in the input frame.
return false;
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@ -132,12 +126,17 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
__ Add(fp_to_sp, masm()->StackPointer(),
__ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Mov(x0, 0);
Label context_check;
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(x1, &context_check);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@ -212,6 +211,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
__ Ldr(__ StackPointer(),
MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;

15
deps/v8/src/arm64/frames-arm64.h

@ -34,16 +34,11 @@ class EntryFrameConstants : public AllStatic {
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
class ExitFrameConstants : public TypedFrameConstants {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kCallerSPDisplacement = 2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
static const int kSPOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
static const int kLastExitFrameField = kCodeOffset;
static const int kConstantPoolOffset = 0; // Not used
@ -59,7 +54,7 @@ class JavaScriptFrameConstants : public AllStatic {
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
};

80
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -114,37 +114,8 @@ void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register ToLengthDescriptor::ReceiverRegister() { return x0; }
// static
const Register ToStringDescriptor::ReceiverRegister() { return x0; }
// static
const Register ToNameDescriptor::ReceiverRegister() { return x0; }
// static
const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -294,6 +265,13 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Descriptor::InitializePlatformSpecific( \
CallInterfaceDescriptorData* data) { \
data->InitializePlatformSpecific(0, nullptr, nullptr); \
}
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -336,28 +314,18 @@ void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
void FastArrayPushDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value to compare
// stack param count needs (arg count)
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
// x1: left operand
// x0: right operand
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -444,25 +412,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
x3, // actual number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);

229
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -1355,6 +1355,14 @@ void MacroAssembler::AssertStackConsistency() {
}
}
void MacroAssembler::AssertCspAligned() {
if (emit_debug_code() && use_real_aborts()) {
// TODO(titzer): use a real assert for alignment check?
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
ldr(temp, MemOperand(csp));
}
}
void MacroAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
@ -1548,24 +1556,38 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Register scratch1,
Register scratch2,
Label* no_memento_found) {
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
Label map_check;
Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
Add(scratch1, receiver,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
Cmp(scratch1, new_space_start);
B(lt, no_memento_found);
Mov(scratch2, new_space_allocation_top);
Ldr(scratch2, MemOperand(scratch2));
Cmp(scratch1, scratch2);
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver, no_memento_found);
Add(scratch1, receiver, kMementoEndOffset);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
Eor(scratch2, scratch1, new_space_allocation_top);
Tst(scratch2, ~Page::kPageAlignmentMask);
B(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
Eor(scratch2, scratch1, receiver);
Tst(scratch2, ~Page::kPageAlignmentMask);
B(ne, no_memento_found);
// Continue with the actual map check.
jmp(&map_check);
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
Cmp(scratch1, new_space_allocation_top);
B(gt, no_memento_found);
Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
Cmp(scratch1,
Operand(isolate()->factory()->allocation_memento_map()));
// Memento map check.
bind(&map_check);
Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
}
@ -1690,6 +1712,18 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
void MacroAssembler::AssertNotNumber(Register value) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
Tst(value, kSmiTagMask);
Check(ne, kOperandIsANumber);
Label done;
JumpIfNotHeapNumber(value, &done);
Abort(kOperandIsANumber);
Bind(&done);
}
}
void MacroAssembler::AssertNumber(Register value) {
if (emit_debug_code()) {
Label done;
@ -2330,6 +2364,66 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
B(ne, not_unique_name);
}
void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
if (callee_args_count.is_reg()) {
DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
scratch1));
} else {
DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
}
#endif
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
__ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
__ add(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
add(src_reg, src_reg, Operand(kPointerSize));
} else {
add(src_reg, jssp,
Operand((callee_args_count.immediate() + 1) * kPointerSize));
}
if (FLAG_debug_code) {
__ Cmp(src_reg, dst_reg);
__ Check(lo, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
__ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop, entry;
__ B(&entry);
__ bind(&loop);
__ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
__ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
__ bind(&entry);
__ Cmp(jssp, src_reg);
__ B(ne, &loop);
// Leave current frame.
__ Mov(jssp, dst_reg);
__ SetStackPointer(jssp);
__ AssertStackConsistency();
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@ -2651,18 +2745,17 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
Bind(&done);
}
void MacroAssembler::StubPrologue() {
void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
UseScratchRegisterScope temps(this);
frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(StackFrame::STUB));
// Compiled stubs don't age, and so they don't need the predictable code
// ageing sequence.
__ Push(lr, fp, cp, temp);
__ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
Mov(temp, Smi::FromInt(type));
Push(lr, fp);
Mov(fp, StackPointer());
Claim(frame_slots);
str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
}
void MacroAssembler::Prologue(bool code_pre_aging) {
if (code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
@ -2694,18 +2787,26 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
Push(lr, fp, cp);
Mov(type_reg, Smi::FromInt(type));
Mov(code_reg, Operand(CodeObject()));
Push(type_reg, code_reg);
// jssp[4] : lr
// jssp[3] : fp
// jssp[2] : cp
// jssp[1] : type
// jssp[0] : code object
// Adjust FP to point to saved FP.
Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
if (type == StackFrame::INTERNAL) {
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
Mov(code_reg, Operand(CodeObject()));
Push(code_reg);
Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
// jssp[4] : lr
// jssp[3] : fp
// jssp[1] : type
// jssp[0] : [code object]
} else {
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
// jssp[2] : lr
// jssp[1] : fp
// jssp[0] : type
}
}
@ -2746,20 +2847,23 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
Mov(scratch, Operand(CodeObject()));
Push(lr, fp);
Mov(fp, StackPointer());
Push(xzr, scratch);
Mov(scratch, Smi::FromInt(StackFrame::EXIT));
Push(scratch);
Push(xzr);
Mov(scratch, Operand(CodeObject()));
Push(scratch);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// jssp -> fp[-16]: CodeObject()
STATIC_ASSERT((2 * kPointerSize) ==
ExitFrameConstants::kCallerSPDisplacement);
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// jssp -> fp[-24]: CodeObject()
STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
// Save the frame pointer and context pointer in the top frame.
Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@ -2769,8 +2873,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
isolate())));
Str(cp, MemOperand(scratch));
STATIC_ASSERT((-2 * kPointerSize) ==
ExitFrameConstants::kLastExitFrameField);
STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
if (save_doubles) {
ExitFramePreserveFPRegs();
}
@ -2781,9 +2884,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
Claim(extra_space + 1, kXRegSize);
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
// jssp -> jssp[0]: Space reserved for the return address.
@ -2793,9 +2897,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
@ -3678,8 +3783,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
// Load current lexical context from the stack frame.
Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load current lexical context from the active StandardFrame, which
// may require crawling past STUB frames.
Label load_context;
Label has_context;
Mov(scratch2, fp);
bind(&load_context);
Ldr(scratch1,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
JumpIfNotSmi(scratch1, &has_context);
Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
B(&load_context);
bind(&has_context);
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Cmp(scratch1, 0);
@ -3916,13 +4032,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
(1 << (14 + kPointerSizeLog2)));
Tst(scratch1, StoreBuffer::kStoreBufferMask);
if (and_then == kFallThroughAtEnd) {
Tbz(scratch1, (14 + kPointerSizeLog2), &done);
B(ne, &done);
} else {
DCHECK(and_then == kReturnAtEnd);
Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
B(eq, &store_buffer_overflow);
Ret();
}

16
deps/v8/src/arm64/macro-assembler-arm64.h

@ -788,6 +788,9 @@ class MacroAssembler : public Assembler {
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
// Preserve the callee-saved registers (as defined by AAPCS64).
//
// Higher-numbered registers are pushed before lower-numbered registers, and
@ -895,6 +898,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
inline void Move(Register dst, Handle<Object> x) { LoadObject(dst, x); }
inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
@ -986,6 +990,7 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a number (heap number or smi).
void AssertNumber(Register value);
void AssertNotNumber(Register value);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@ -1165,6 +1170,15 @@ class MacroAssembler : public Assembler {
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None());
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count_reg| do not include
// receiver. |callee_args_count| is not modified, |caller_args_count_reg|
// is trashed.
void PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg, Register scratch0,
Register scratch1);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@ -1621,7 +1635,7 @@ class MacroAssembler : public Assembler {
void ExitFrameRestoreFPRegs();
// Generates function and stub prologue code.
void StubPrologue();
void StubPrologue(StackFrame::Type type, int frame_slots);
void Prologue(bool code_pre_aging);
// Enter exit frame. Exit frames are used when calling C code from generated

247
deps/v8/src/assembler.cc

@ -60,7 +60,8 @@
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
#include "src/snapshot/serializer-common.h"
#include "src/wasm/wasm-external-refs.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
@ -76,6 +77,8 @@
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
@ -98,6 +101,8 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
@ -833,10 +838,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
return "debug break slot at tail call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
return "generator continuation";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@ -929,7 +938,9 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case GENERATOR_CONTINUATION:
case WASM_MEMORY_REFERENCE:
case NONE32:
case NONE64:
break;
@ -1147,66 +1158,199 @@ ExternalReference ExternalReference::compute_output_frames_function(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
ExternalReference ExternalReference::wasm_f32_trunc(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
}
ExternalReference ExternalReference::wasm_f32_floor(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f32_floor_wrapper)));
}
ExternalReference ExternalReference::wasm_f32_ceil(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
}
ExternalReference ExternalReference::wasm_f32_nearest_int(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
}
ExternalReference ExternalReference::wasm_f64_trunc(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
}
ExternalReference ExternalReference::wasm_f64_floor(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f64_floor_wrapper)));
}
ExternalReference ExternalReference::wasm_f64_ceil(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
}
ExternalReference ExternalReference::wasm_f64_nearest_int(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
}
ExternalReference ExternalReference::wasm_int64_to_float32(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
}
ExternalReference ExternalReference::wasm_uint64_to_float32(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
}
ExternalReference ExternalReference::wasm_int64_to_float64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
}
ExternalReference ExternalReference::wasm_uint64_to_float64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
}
ExternalReference ExternalReference::wasm_float32_to_int64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
}
ExternalReference ExternalReference::wasm_float32_to_uint64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
}
ExternalReference ExternalReference::wasm_float64_to_int64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
}
ExternalReference ExternalReference::wasm_float64_to_uint64(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
}
ExternalReference ExternalReference::f32_trunc_wrapper_function(
ExternalReference ExternalReference::wasm_int64_div(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::int64_div_wrapper)));
}
ExternalReference ExternalReference::wasm_int64_mod(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::int64_mod_wrapper)));
}
ExternalReference ExternalReference::wasm_uint64_div(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::uint64_div_wrapper)));
}
ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
}
static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
ExternalReference ExternalReference::f64_acos_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_trunc_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
}
static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
static void f64_asin_wrapper(double* param) { *param = std::asin(*param); }
ExternalReference ExternalReference::f32_floor_wrapper_function(
ExternalReference ExternalReference::f64_asin_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_floor_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
}
static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
static void f64_atan_wrapper(double* param) { *param = std::atan(*param); }
ExternalReference ExternalReference::f32_ceil_wrapper_function(
ExternalReference ExternalReference::f64_atan_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_ceil_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan_wrapper)));
}
static void f32_nearest_int_wrapper(float* param) {
*param = nearbyintf(*param);
static void f64_cos_wrapper(double* param) { *param = std::cos(*param); }
ExternalReference ExternalReference::f64_cos_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_cos_wrapper)));
}
ExternalReference ExternalReference::f32_nearest_int_wrapper_function(
static void f64_sin_wrapper(double* param) { *param = std::sin(*param); }
ExternalReference ExternalReference::f64_sin_wrapper_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(f32_nearest_int_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_sin_wrapper)));
}
static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
static void f64_tan_wrapper(double* param) { *param = std::tan(*param); }
ExternalReference ExternalReference::f64_trunc_wrapper_function(
ExternalReference ExternalReference::f64_tan_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_trunc_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_tan_wrapper)));
}
static void f64_floor_wrapper(double* param) { *param = floor(*param); }
static void f64_exp_wrapper(double* param) { *param = std::exp(*param); }
ExternalReference ExternalReference::f64_floor_wrapper_function(
ExternalReference ExternalReference::f64_exp_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_floor_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_exp_wrapper)));
}
static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
static void f64_log_wrapper(double* param) { *param = std::log(*param); }
ExternalReference ExternalReference::f64_ceil_wrapper_function(
ExternalReference ExternalReference::f64_log_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_ceil_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_log_wrapper)));
}
static void f64_nearest_int_wrapper(double* param) {
*param = nearbyint(*param);
static void f64_pow_wrapper(double* param0, double* param1) {
*param0 = power_double_double(*param0, *param1);
}
ExternalReference ExternalReference::f64_nearest_int_wrapper_function(
ExternalReference ExternalReference::f64_pow_wrapper_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(f64_nearest_int_wrapper)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_pow_wrapper)));
}
static void f64_atan2_wrapper(double* param0, double* param1) {
double x = *param0;
double y = *param1;
// TODO(bradnelson): Find a good place to put this to share
// with the same code in src/runtime/runtime-math.cc
static const double kPiDividedBy4 = 0.78539816339744830962;
if (std::isinf(x) && std::isinf(y)) {
// Make sure that the result in case of two infinite arguments
// is a multiple of Pi / 4. The sign of the result is determined
// by the first argument (x) and the sign of the second argument
// determines the multiplier: one or three.
int multiplier = (x < 0) ? -1 : 1;
if (y < 0) multiplier *= 3;
*param0 = multiplier * kPiDividedBy4;
} else {
*param0 = std::atan2(x, y);
}
}
ExternalReference ExternalReference::f64_atan2_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan2_wrapper)));
}
static void f64_mod_wrapper(double* param0, double* param1) {
*param0 = modulo(*param0, *param1);
}
ExternalReference ExternalReference::f64_mod_wrapper_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
}
ExternalReference ExternalReference::log_enter_external_function(
@ -1262,12 +1406,6 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address());
}
ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceStart());
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@ -1404,6 +1542,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS64
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_S390
function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
@ -1489,6 +1629,10 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
Isolate* isolate) {
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
ExternalReference ExternalReference::debug_is_active_address(
Isolate* isolate) {
@ -1559,34 +1703,12 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
#if (defined(__MINGW64_VERSION_MAJOR) && \
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)) || \
defined(V8_OS_AIX)
// MinGW64 and AIX have a custom implementation for pow. This handles certain
// special cases that are different.
if ((x == 0.0 || std::isinf(x)) && y != 0.0 && std::isfinite(y)) {
double f;
double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
/* retain sign if odd integer exponent */
return ((std::modf(y, &f) == 0.0) && (static_cast<int64_t>(y) & 1))
? copysign(result, x)
: result;
}
if (x == 2.0) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return std::ldexp(1.0, y_int);
}
}
#endif
// The checks for special cases can be dropped in ia32 because it has already
// been done in generated code before bailing out here.
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
return std::numeric_limits<double>::quiet_NaN();
}
return std::pow(x, y);
return Pow(x, y);
}
@ -1648,8 +1770,7 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
void PositionsRecorder::RecordPosition(int pos) {
void AssemblerPositionsRecorder::RecordPosition(int pos) {
DCHECK(pos != RelocInfo::kNoPosition);
DCHECK(pos >= 0);
state_.current_position = pos;
@ -1659,8 +1780,7 @@ void PositionsRecorder::RecordPosition(int pos) {
pos));
}
void PositionsRecorder::RecordStatementPosition(int pos) {
void AssemblerPositionsRecorder::RecordStatementPosition(int pos) {
DCHECK(pos != RelocInfo::kNoPosition);
DCHECK(pos >= 0);
state_.current_statement_position = pos;
@ -1671,8 +1791,7 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
pos));
}
bool PositionsRecorder::WriteRecordedPositions() {
bool AssemblerPositionsRecorder::WriteRecordedPositions() {
bool written = false;
// Write the statement position if it is different from what was written last

89
deps/v8/src/assembler.h

@ -38,6 +38,7 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -384,6 +385,8 @@ class RelocInfo {
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
// To relocate pointers into the wasm memory embedded in wasm code
WASM_MEMORY_REFERENCE,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@ -395,6 +398,7 @@ class RelocInfo {
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
DEBUG_BREAK_SLOT_AT_TAIL_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@ -426,7 +430,8 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_GCED_ENUM = CELL,
LAST_GCED_ENUM = WASM_MEMORY_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
@ -487,7 +492,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
IsDebugBreakSlotAtCall(mode);
IsDebugBreakSlotAtCall(mode) || IsDebugBreakSlotAtTailCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@ -498,6 +503,9 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
}
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
@ -510,6 +518,9 @@ class RelocInfo {
static inline bool IsGeneratorContinuation(Mode mode) {
return mode == GENERATOR_CONTINUATION;
}
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@ -570,6 +581,10 @@ class RelocInfo {
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
INLINE(Address wasm_memory_reference());
INLINE(void update_wasm_memory_reference(
Address old_base, Address new_base, size_t old_size, size_t new_size,
ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
INLINE(Address constant_pool_entry_address());
@ -913,14 +928,38 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
static ExternalReference f32_trunc_wrapper_function(Isolate* isolate);
static ExternalReference f32_floor_wrapper_function(Isolate* isolate);
static ExternalReference f32_ceil_wrapper_function(Isolate* isolate);
static ExternalReference f32_nearest_int_wrapper_function(Isolate* isolate);
static ExternalReference f64_trunc_wrapper_function(Isolate* isolate);
static ExternalReference f64_floor_wrapper_function(Isolate* isolate);
static ExternalReference f64_ceil_wrapper_function(Isolate* isolate);
static ExternalReference f64_nearest_int_wrapper_function(Isolate* isolate);
static ExternalReference wasm_f32_trunc(Isolate* isolate);
static ExternalReference wasm_f32_floor(Isolate* isolate);
static ExternalReference wasm_f32_ceil(Isolate* isolate);
static ExternalReference wasm_f32_nearest_int(Isolate* isolate);
static ExternalReference wasm_f64_trunc(Isolate* isolate);
static ExternalReference wasm_f64_floor(Isolate* isolate);
static ExternalReference wasm_f64_ceil(Isolate* isolate);
static ExternalReference wasm_f64_nearest_int(Isolate* isolate);
static ExternalReference wasm_int64_to_float32(Isolate* isolate);
static ExternalReference wasm_uint64_to_float32(Isolate* isolate);
static ExternalReference wasm_int64_to_float64(Isolate* isolate);
static ExternalReference wasm_uint64_to_float64(Isolate* isolate);
static ExternalReference wasm_float32_to_int64(Isolate* isolate);
static ExternalReference wasm_float32_to_uint64(Isolate* isolate);
static ExternalReference wasm_float64_to_int64(Isolate* isolate);
static ExternalReference wasm_float64_to_uint64(Isolate* isolate);
static ExternalReference wasm_int64_div(Isolate* isolate);
static ExternalReference wasm_int64_mod(Isolate* isolate);
static ExternalReference wasm_uint64_div(Isolate* isolate);
static ExternalReference wasm_uint64_mod(Isolate* isolate);
static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
static ExternalReference f64_atan_wrapper_function(Isolate* isolate);
static ExternalReference f64_cos_wrapper_function(Isolate* isolate);
static ExternalReference f64_sin_wrapper_function(Isolate* isolate);
static ExternalReference f64_tan_wrapper_function(Isolate* isolate);
static ExternalReference f64_exp_wrapper_function(Isolate* isolate);
static ExternalReference f64_log_wrapper_function(Isolate* isolate);
static ExternalReference f64_atan2_wrapper_function(Isolate* isolate);
static ExternalReference f64_pow_wrapper_function(Isolate* isolate);
static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
@ -952,9 +991,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_regexp_stack_memory_size(
Isolate* isolate);
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start(Isolate* isolate);
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
@ -994,6 +1030,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference cpu_features();
static ExternalReference is_tail_call_elimination_enabled_address(
Isolate* isolate);
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
@ -1085,23 +1124,11 @@ struct PositionState {
int written_statement_position;
};
class PositionsRecorder BASE_EMBEDDED {
class AssemblerPositionsRecorder : public PositionsRecorder {
public:
explicit PositionsRecorder(Assembler* assembler)
: assembler_(assembler) {
jit_handler_data_ = NULL;
}
void AttachJITHandlerData(void* user_data) {
jit_handler_data_ = user_data;
}
explicit AssemblerPositionsRecorder(Assembler* assembler)
: assembler_(assembler) {}
void* DetachJITHandlerData() {
void* old_data = jit_handler_data_;
jit_handler_data_ = NULL;
return old_data;
}
// Set current position to pos.
void RecordPosition(int pos);
@ -1121,11 +1148,7 @@ class PositionsRecorder BASE_EMBEDDED {
Assembler* assembler_;
PositionState state_;
// Currently jit_handler_data_ is used to store JITHandler-specific data
// over the lifetime of a PositionsRecorder
void* jit_handler_data_;
DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
DISALLOW_COPY_AND_ASSIGN(AssemblerPositionsRecorder);
};

14
deps/v8/src/ast/ast-numbering.cc

@ -138,7 +138,6 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
IncrementNodeCount();
DisableCrankshaft(kDoExpression);
node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
Visit(node->block());
Visit(node->result());
@ -267,10 +266,6 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
DisableOptimization(kCallToAJavaScriptRuntimeFunction);
}
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
@ -504,9 +499,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
IncrementNodeCount();
if (node->tail_call_mode() == TailCallMode::kAllow) {
DisableOptimization(kTailCall);
}
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
@ -571,12 +563,6 @@ bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
Scope* scope = node->scope();
if (scope->HasIllegalRedeclaration()) {
Visit(scope->GetIllegalRedeclaration());
DisableOptimization(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {

1
deps/v8/src/ast/ast-value-factory.h

@ -271,7 +271,6 @@ class AstValue : public ZoneObject {
F(throw, "throw") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
F(use_strong, "use strong") \
F(use_strict, "use strict") \
F(value, "value")

55
deps/v8/src/ast/ast.cc

@ -36,17 +36,11 @@ AST_NODE_LIST(DECL_ACCEPT)
#ifdef DEBUG
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this);
}
void AstNode::PrettyPrint() { PrettyPrint(Isolate::Current()); }
void AstNode::PrettyPrint(Isolate* isolate) {
PrettyPrinter::PrintOut(isolate, this);
}
@ -68,8 +62,11 @@ bool Expression::IsNullLiteral() const {
return IsLiteral() && AsLiteral()->value()->IsNull();
}
bool Expression::IsUndefinedLiteral() const {
if (IsLiteral() && AsLiteral()->value()->IsUndefined()) {
return true;
}
bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
const VariableProxy* var_proxy = AsVariableProxy();
if (var_proxy == NULL) return false;
Variable* var = var_proxy->var();
@ -154,15 +151,11 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
}
}
void ForEachStatement::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// TODO(adamk): for-of statements do not make use of this feedback slot.
// The each_slot_ should be specific to ForInStatement, and this work moved
// there.
if (IsForOfStatement()) return;
void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
}
@ -475,18 +468,15 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
if (key->IsString()
&& Handle<String>::cast(key)->AsArrayIndex(&element_index)
&& element_index > max_element_index) {
max_element_index = element_index;
if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index);
elements++;
} else if (key->IsSmi()) {
int key_value = Smi::cast(*key)->value();
if (key_value > 0
&& static_cast<uint32_t>(key_value) > max_element_index) {
max_element_index = key_value;
}
key = isolate->factory()->NewNumberFromUint(element_index);
} else if (key->ToArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index);
elements++;
} else if (key->IsNumber()) {
key = isolate->factory()->NumberToString(key);
}
// Add name, value pair to the fixed array.
@ -513,7 +503,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array = isolate->factory()->NewJSArray(
FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
// Fill in the literals.
bool is_simple = true;
@ -678,24 +668,21 @@ static bool IsVoidOfLiteral(Expression* expr) {
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
Expression** expr,
Isolate* isolate) {
Expression** expr) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(
Expression** expr, Isolate* isolate) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr);
}

136
deps/v8/src/ast/ast.h

@ -198,9 +198,7 @@ class AstNode: public ZoneObject {
#ifdef DEBUG
void PrettyPrint(Isolate* isolate);
void PrettyPrint();
void Print(Isolate* isolate);
void Print();
#endif // DEBUG
// Type testing & conversion functions overridden by concrete subclasses.
@ -332,8 +330,9 @@ class Expression : public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral() const;
// True if we can prove that the expression is the undefined literal.
bool IsUndefinedLiteral(Isolate* isolate) const;
// True if we can prove that the expression is the undefined literal. Note
// that this also checks for loads of the global "undefined" variable.
bool IsUndefinedLiteral() const;
// True iff the expression is a valid target for an assignment.
bool IsValidReferenceExpressionOrThis() const;
@ -792,10 +791,6 @@ class ForEachStatement : public IterationStatement {
void set_each(Expression* e) { each_ = e; }
void set_subject(Expression* e) { subject_ = e; }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) override;
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
static const char* VisitModeString(VisitMode mode) {
return mode == ITERATE ? "for-of" : "for-in";
}
@ -807,7 +802,6 @@ class ForEachStatement : public IterationStatement {
private:
Expression* each_;
Expression* subject_;
FeedbackVectorSlot each_slot_;
};
@ -821,11 +815,8 @@ class ForInStatement final : public ForEachStatement {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) override {
ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
for_in_feedback_slot_ = spec->AddGeneralSlot();
}
FeedbackVectorSlotCache* cache) override;
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
DCHECK(!for_in_feedback_slot_.IsInvalid());
return for_in_feedback_slot_;
@ -854,6 +845,7 @@ class ForInStatement final : public ForEachStatement {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ForInType for_in_type_;
FeedbackVectorSlot each_slot_;
FeedbackVectorSlot for_in_feedback_slot_;
};
@ -1191,18 +1183,33 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
// The clear_pending_message flag indicates whether or not to clear the
// isolate's pending exception message before executing the catch_block. In
// the normal use case, this flag is always on because the message object
// is not needed anymore when entering the catch block and should not be kept
// alive.
// The use case where the flag is off is when the catch block is guaranteed to
// rethrow the caught exception (using %ReThrow), which reuses the pending
// message instead of generating a new one.
// (When the catch block doesn't rethrow but is guaranteed to perform an
// ordinary throw, not clearing the old message is safe but not very useful.)
bool clear_pending_message() { return clear_pending_message_; }
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
Variable* variable, Block* catch_block, int pos)
Variable* variable, Block* catch_block,
bool clear_pending_message, int pos)
: TryStatement(zone, try_block, pos),
scope_(scope),
variable_(variable),
catch_block_(catch_block) {}
catch_block_(catch_block),
clear_pending_message_(clear_pending_message) {}
private:
Scope* scope_;
Variable* variable_;
Block* catch_block_;
bool clear_pending_message_;
};
@ -1339,14 +1346,11 @@ class MaterializedLiteral : public Expression {
return depth_;
}
bool is_strong() const { return is_strong_; }
protected:
MaterializedLiteral(Zone* zone, int literal_index, bool is_strong, int pos)
MaterializedLiteral(Zone* zone, int literal_index, int pos)
: Expression(zone, pos),
literal_index_(literal_index),
is_simple_(false),
is_strong_(is_strong),
depth_(0) {}
// A materialized literal is simple if the values consist of only
@ -1375,7 +1379,6 @@ class MaterializedLiteral : public Expression {
private:
int literal_index_;
bool is_simple_;
bool is_strong_;
int depth_;
friend class AstLiteralReindexer;
@ -1463,7 +1466,6 @@ class ObjectLiteral final : public MaterializedLiteral {
ZoneList<Property*>* properties() const { return properties_; }
bool fast_elements() const { return fast_elements_; }
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
bool has_elements() const { return has_elements_; }
bool has_shallow_properties() const {
return depth() == 1 && !has_elements() && !may_store_doubles();
@ -1483,26 +1485,20 @@ class ObjectLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
flags |= has_function() ? kHasFunction : kNoFlags;
if (has_shallow_properties()) {
flags |= kShallowProperties;
}
if (disable_mementos) {
flags |= kDisableMementos;
}
if (is_strong()) {
flags |= kIsStrong;
}
return flags;
}
enum Flags {
kNoFlags = 0,
kFastElements = 1,
kHasFunction = 1 << 1,
kShallowProperties = 1 << 2,
kDisableMementos = 1 << 3,
kIsStrong = 1 << 4
kShallowProperties = 1 << 1,
kDisableMementos = 1 << 2
};
struct Accessors: public ZoneObject {
@ -1534,15 +1530,13 @@ class ObjectLiteral final : public MaterializedLiteral {
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
int boilerplate_properties, bool has_function, bool is_strong,
int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
int boilerplate_properties, int pos)
: MaterializedLiteral(zone, literal_index, pos),
properties_(properties),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
has_elements_(false),
may_store_doubles_(false),
has_function_(has_function) {}
may_store_doubles_(false) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
private:
@ -1553,7 +1547,6 @@ class ObjectLiteral final : public MaterializedLiteral {
bool fast_elements_;
bool has_elements_;
bool may_store_doubles_;
bool has_function_;
FeedbackVectorSlot slot_;
};
@ -1589,8 +1582,8 @@ class RegExpLiteral final : public MaterializedLiteral {
protected:
RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
int literal_index, bool is_strong, int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
int literal_index, int pos)
: MaterializedLiteral(zone, literal_index, pos),
pattern_(pattern),
flags_(flags) {
set_depth(1);
@ -1635,9 +1628,6 @@ class ArrayLiteral final : public MaterializedLiteral {
if (disable_mementos) {
flags |= kDisableMementos;
}
if (is_strong()) {
flags |= kIsStrong;
}
return flags;
}
@ -1657,8 +1647,7 @@ class ArrayLiteral final : public MaterializedLiteral {
enum Flags {
kNoFlags = 0,
kShallowElements = 1,
kDisableMementos = 1 << 1,
kIsStrong = 1 << 2
kDisableMementos = 1 << 1
};
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
@ -1667,9 +1656,8 @@ class ArrayLiteral final : public MaterializedLiteral {
protected:
ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
int first_spread_index, int literal_index, bool is_strong,
int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
int first_spread_index, int literal_index, int pos)
: MaterializedLiteral(zone, literal_index, pos),
values_(values),
first_spread_index_(first_spread_index) {}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
@ -2313,7 +2301,7 @@ class CompareOperation final : public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
protected:
@ -2529,37 +2517,29 @@ class RewritableExpression : public Expression {
Expression* expr_;
};
// Our Yield is different from the JS yield in that it "returns" its argument as
// is, without wrapping it in an iterator result object. Such wrapping, if
// desired, must be done beforehand (see the parser).
class Yield final : public Expression {
public:
DECLARE_NODE_TYPE(Yield)
enum Kind {
kInitial, // The initial yield that returns the unboxed generator object.
kSuspend, // A normal yield: { value: EXPRESSION, done: false }
kDelegating, // A yield*.
kFinal // A return: { value: EXPRESSION, done: true }
};
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
protected:
Yield(Zone* zone, Expression* generator_object, Expression* expression,
Kind yield_kind, int pos)
int pos)
: Expression(zone, pos),
generator_object_(generator_object),
expression_(expression),
yield_kind_(yield_kind) {}
expression_(expression) {}
private:
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
};
@ -3169,8 +3149,17 @@ class AstNodeFactory final BASE_EMBEDDED {
TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
Block* catch_block, int pos) {
return new (local_zone_) TryCatchStatement(local_zone_, try_block, scope,
variable, catch_block, pos);
return new (local_zone_) TryCatchStatement(
local_zone_, try_block, scope, variable, catch_block, true, pos);
}
TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (local_zone_) TryCatchStatement(
local_zone_, try_block, scope, variable, catch_block, false, pos);
}
TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
@ -3243,12 +3232,9 @@ class AstNodeFactory final BASE_EMBEDDED {
ZoneList<ObjectLiteral::Property*>* properties,
int literal_index,
int boilerplate_properties,
bool has_function,
bool is_strong,
int pos) {
return new (local_zone_)
ObjectLiteral(local_zone_, properties, literal_index,
boilerplate_properties, has_function, is_strong, pos);
return new (local_zone_) ObjectLiteral(
local_zone_, properties, literal_index, boilerplate_properties, pos);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@ -3267,24 +3253,23 @@ class AstNodeFactory final BASE_EMBEDDED {
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
int literal_index, bool is_strong, int pos) {
return new (local_zone_) RegExpLiteral(local_zone_, pattern, flags,
literal_index, is_strong, pos);
int literal_index, int pos) {
return new (local_zone_)
RegExpLiteral(local_zone_, pattern, flags, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
bool is_strong,
int pos) {
return new (local_zone_)
ArrayLiteral(local_zone_, values, -1, literal_index, is_strong, pos);
ArrayLiteral(local_zone_, values, -1, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int first_spread_index, int literal_index,
bool is_strong, int pos) {
int pos) {
return new (local_zone_) ArrayLiteral(
local_zone_, values, first_spread_index, literal_index, is_strong, pos);
local_zone_, values, first_spread_index, literal_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@ -3399,11 +3384,10 @@ class AstNodeFactory final BASE_EMBEDDED {
Yield* NewYield(Expression *generator_object,
Expression* expression,
Yield::Kind yield_kind,
int pos) {
if (!expression) expression = NewUndefinedLiteral(pos);
return new (local_zone_)
Yield(local_zone_, generator_object, expression, yield_kind, pos);
Yield(local_zone_, generator_object, expression, pos);
}
Throw* NewThrow(Expression* exception, int pos) {

7
deps/v8/src/ast/prettyprinter.cc

@ -471,7 +471,7 @@ static int FormatSlotNode(Vector<char>* buf, Expression* node,
const char* node_name, FeedbackVectorSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
pos = SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
}
return pos;
}
@ -1563,6 +1563,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
Variable* var = node->var();
switch (var->location()) {
case VariableLocation::UNALLOCATED:
SNPrintF(buf + pos, " unallocated");
break;
case VariableLocation::PARAMETER:
SNPrintF(buf + pos, " parameter[%d]", var->index());
@ -1593,9 +1594,7 @@ void AstPrinter::VisitAssignment(Assignment* node) {
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD (kind %d)", node->yield_kind());
IndentedScope indent(this, buf.start(), node->position());
IndentedScope indent(this, "YIELD", node->position());
Visit(node->expression());
}

68
deps/v8/src/ast/scopes.cc

@ -100,7 +100,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
function_kind);
// The outermost scope must be a script scope.
DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
DCHECK(!HasIllegalRedeclaration());
}
Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
@ -169,9 +168,7 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
function_ = nullptr;
arguments_ = nullptr;
this_function_ = nullptr;
illegal_redecl_ = nullptr;
scope_inside_with_ = false;
scope_contains_with_ = false;
scope_calls_eval_ = false;
scope_uses_arguments_ = false;
scope_uses_super_property_ = false;
@ -210,15 +207,14 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
// Reconstruct the outer scope chain from a closure's context chain.
Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
bool contains_with = false;
while (!context->IsNativeContext()) {
if (context->IsWithContext()) {
if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
Scope* with_scope = new (zone)
Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(),
script_scope->ast_value_factory_);
current_scope = with_scope;
// All the inner scopes are inside a with.
contains_with = true;
for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
s->scope_inside_with_ = true;
}
@ -252,13 +248,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
script_scope->ast_value_factory_->GetString(Handle<String>(name)),
script_scope->ast_value_factory_);
}
if (contains_with) current_scope->RecordWithStatement();
if (innermost_scope == NULL) innermost_scope = current_scope;
// Forget about a with when we move to a context for a different function.
if (context->previous()->closure() != context->closure()) {
contains_with = false;
}
context = context->previous();
}
@ -392,7 +382,6 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
if (uses_arguments()) other->RecordArgumentsUsage();
if (uses_super_property()) other->RecordSuperPropertyUsage();
if (calls_eval()) other->RecordEvalCall();
if (scope_contains_with_) other->RecordWithStatement();
}
@ -583,21 +572,6 @@ void Scope::AddDeclaration(Declaration* declaration) {
}
void Scope::SetIllegalRedeclaration(Expression* expression) {
// Record only the first illegal redeclaration.
if (!HasIllegalRedeclaration()) {
illegal_redecl_ = expression;
}
DCHECK(HasIllegalRedeclaration());
}
Expression* Scope::GetIllegalRedeclaration() {
DCHECK(HasIllegalRedeclaration());
return illegal_redecl_;
}
Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
@ -817,25 +791,7 @@ Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
return scope_info_;
}
void Scope::GetNestedScopeChain(Isolate* isolate,
List<Handle<ScopeInfo> >* chain, int position) {
if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo(isolate)));
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* scope = inner_scopes_[i];
int beg_pos = scope->start_position();
int end_pos = scope->end_position();
DCHECK(beg_pos >= 0 && end_pos >= 0);
if (beg_pos <= position && position < end_pos) {
scope->GetNestedScopeChain(isolate, chain, position);
return;
}
}
}
void Scope::CollectNonLocals(HashMap* non_locals) {
Handle<StringSet> Scope::CollectNonLocals(Handle<StringSet> non_locals) {
// Collect non-local variables referenced in the scope.
// TODO(yangguo): store non-local variables explicitly if we can no longer
// rely on unresolved_ to find them.
@ -843,13 +799,12 @@ void Scope::CollectNonLocals(HashMap* non_locals) {
VariableProxy* proxy = unresolved_[i];
if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
Handle<String> name = proxy->name();
void* key = reinterpret_cast<void*>(name.location());
HashMap::Entry* entry = non_locals->LookupOrInsert(key, name->Hash());
entry->value = key;
non_locals = StringSet::Add(non_locals, name);
}
for (int i = 0; i < inner_scopes_.length(); i++) {
inner_scopes_[i]->CollectNonLocals(non_locals);
non_locals = inner_scopes_[i]->CollectNonLocals(non_locals);
}
return non_locals;
}
@ -999,7 +954,6 @@ void Scope::Print(int n) {
Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
if (scope_uses_super_property_)
@ -1271,8 +1225,8 @@ bool Scope::MustAllocate(Variable* var) {
// visible name.
if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
(var->has_forced_context_allocation() || scope_calls_eval_ ||
inner_scope_calls_eval_ || scope_contains_with_ || is_catch_scope() ||
is_block_scope() || is_module_scope() || is_script_scope())) {
inner_scope_calls_eval_ || is_catch_scope() || is_block_scope() ||
is_module_scope() || is_script_scope())) {
var->set_is_used();
if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
}
@ -1295,10 +1249,8 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope() || is_module_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
scope_calls_eval_ ||
inner_scope_calls_eval_ ||
scope_contains_with_;
return var->has_forced_context_allocation() || scope_calls_eval_ ||
inner_scope_calls_eval_;
}

32
deps/v8/src/ast/scopes.h

@ -224,20 +224,7 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Illegal redeclaration support.
// Set an expression node that will be executed when the scope is
// entered. We only keep track of one illegal redeclaration node per
// scope - the first one - so if you try to set it multiple times
// the additional requests will be silently ignored.
void SetIllegalRedeclaration(Expression* expression);
// Retrieve the illegal redeclaration expression. Do not call if the
// scope doesn't have an illegal redeclaration node.
Expression* GetIllegalRedeclaration();
// Check if the scope has (at least) one illegal redeclaration.
bool HasIllegalRedeclaration() const { return illegal_redecl_ != NULL; }
// For harmony block scoping mode: Check if the scope has conflicting var
// Check if the scope has conflicting var
// declarations, i.e. a var declaration that has been hoisted from a nested
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
@ -245,9 +232,6 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Scope-specific info.
// Inform the scope that the corresponding code contains a with statement.
void RecordWithStatement() { scope_contains_with_ = true; }
// Inform the scope that the corresponding code contains an eval call.
void RecordEvalCall() { scope_calls_eval_ = true; }
@ -556,14 +540,7 @@ class Scope: public ZoneObject {
Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
// Get the chain of nested scopes within this scope for the source statement
// position. The scopes will be added to the list from the outermost scope to
// the innermost scope. Only nested block, catch or with scopes are tracked
// and will be returned, but no inner function scopes.
void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
int statement_position);
void CollectNonLocals(HashMap* non_locals);
Handle<StringSet> CollectNonLocals(Handle<StringSet> non_locals);
// ---------------------------------------------------------------------------
// Strict mode support.
@ -646,15 +623,10 @@ class Scope: public ZoneObject {
// Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap sloppy_block_function_map_;
// Illegal redeclaration.
Expression* illegal_redecl_;
// Scope-specific information computed during parsing.
//
// This scope is inside a 'with' of some outer scope.
bool scope_inside_with_;
// This scope contains a 'with' statement.
bool scope_contains_with_;
// This scope or a nested catch scope or with scope contain an 'eval' call. At
// the 'eval' call site this scope is the declaration scope.
bool scope_calls_eval_;

5
deps/v8/src/background-parsing-task.cc

@ -21,7 +21,7 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
Zone* zone = new Zone();
Zone* zone = new Zone(isolate->allocator());
ParseInfo* info = new ParseInfo(zone);
source->zone.Reset(zone);
source->info.Reset(info);
@ -32,7 +32,8 @@ BackgroundParsingTask::BackgroundParsingTask(
info->set_global();
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
info->set_allow_lazy_parsing(true);
// Parse eagerly with ignition since we will compile eagerly.
info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
}

16
deps/v8/src/bailout-reason.h

@ -14,7 +14,6 @@ namespace internal {
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
V(kAlignmentMarkerExpected, "Alignment marker expected") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
@ -37,8 +36,6 @@ namespace internal {
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
V(kCallToAJavaScriptRuntimeFunction, \
"Call to a JavaScript runtime function") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
@ -57,7 +54,8 @@ namespace internal {
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
V(kDoExpression, "Do expression encountered") \
V(kDoExpressionUnmodelable, \
"Encountered a do-expression with unmodelable control statements") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
@ -84,11 +82,11 @@ namespace internal {
V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
V(kGeneratorResumeMethod, "Generator resume method is being called") \
V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
@ -103,6 +101,7 @@ namespace internal {
V(kInputStringTooLong, "Input string too long") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
@ -140,6 +139,7 @@ namespace internal {
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
@ -230,6 +230,8 @@ namespace internal {
V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
@ -249,7 +251,7 @@ namespace internal {
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
"Unsupported phi use of const or let variable") \
V(kUnexpectedReturnFromBytecodeHandler, \
"Unexpectedly returned from a bytecode handler") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
@ -262,6 +264,8 @@ namespace internal {
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
V(kYield, "Yield")

33
deps/v8/src/base/accounting-allocator.cc

@ -0,0 +1,33 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/accounting-allocator.h"
#include <cstdlib>
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
#endif
namespace v8 {
namespace base {
void* AccountingAllocator::Allocate(size_t bytes) {
void* memory = malloc(bytes);
if (memory) NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
return memory;
}
void AccountingAllocator::Free(void* memory, size_t bytes) {
free(memory);
NoBarrier_AtomicIncrement(&current_memory_usage_,
-static_cast<AtomicWord>(bytes));
}
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
return NoBarrier_Load(&current_memory_usage_);
}
} // namespace base
} // namespace v8

34
deps/v8/src/base/accounting-allocator.h

@ -0,0 +1,34 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_ACCOUNTING_ALLOCATOR_H_
#define V8_BASE_ACCOUNTING_ALLOCATOR_H_
#include "src/base/atomicops.h"
#include "src/base/macros.h"
namespace v8 {
namespace base {
class AccountingAllocator final {
public:
AccountingAllocator() = default;
~AccountingAllocator() = default;
// Returns nullptr on failed allocation.
void* Allocate(size_t bytes);
void Free(void* memory, size_t bytes);
size_t GetCurrentMemoryUsage() const;
private:
AtomicWord current_memory_usage_ = 0;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
} // namespace base
} // namespace v8
#endif // V8_BASE_ACCOUNTING_ALLOCATOR_H_

5
deps/v8/src/base/atomicops_internals_arm_gcc.h

@ -44,14 +44,15 @@ namespace base {
//
inline void MemoryBarrier() {
#if defined(__linux__) || defined(__ANDROID__)
#if defined(__ANDROID__)
// Note: This is a function call, which is also an implicit compiler barrier.
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
#elif defined(__QNXNTO__)
__cpu_membarrier();
#else
#error MemoryBarrier() is not implemented on this platform.
// Fallback to GCC built-in function
__sync_synchronize();
#endif
}

7
deps/v8/src/base/cpu.cc

@ -468,7 +468,12 @@ CPU::CPU()
char* end;
architecture_ = strtol(architecture, &end, 10);
if (end == architecture) {
architecture_ = 0;
// Kernels older than 3.18 report "CPU architecture: AArch64" on ARMv8.
if (strcmp(architecture, "AArch64") == 0) {
architecture_ = 8;
} else {
architecture_ = 0;
}
}
delete[] architecture;

11
deps/v8/src/base/logging.cc

@ -115,3 +115,14 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stderr);
v8::base::OS::Abort();
}
extern "C" void V8_RuntimeError(const char* file, int line,
const char* message) {
fflush(stdout);
fflush(stderr);
v8::base::OS::PrintError("\n\n#\n# Runtime error in %s, line %d\n# ", file,
line);
v8::base::OS::PrintError("\n# %s\n", message);
v8::base::DumpBacktrace();
fflush(stderr);
}

2
deps/v8/src/base/logging.h

@ -14,6 +14,8 @@
extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
const char* format, ...);
extern "C" void V8_RuntimeError(const char* file, int line,
const char* message);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.

11
deps/v8/src/base/macros.h

@ -278,6 +278,17 @@ inline void USE(T) { }
#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#undef V8PRIuPTR
#define V8PRIuPTR "lxu"
#endif
// GCC on S390 31-bit expands 'size_t' to 'long unsigned int'
// instead of 'int', resulting in compilation errors with %d.
// The printf format specifier needs to be %zd instead.
#if V8_HOST_ARCH_S390 && !V8_HOST_ARCH_64_BIT
#define V8_SIZET_PREFIX "z"
#else
#define V8_SIZET_PREFIX ""
#endif
// The following macro works on both 32 and 64-bit platforms.

6
deps/v8/src/base/platform/platform-linux.cc

@ -72,14 +72,14 @@ bool OS::ArmUsingHardFloat() {
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600
#if GCC_VERSION >= 40600 && !defined(__clang__)
#if defined(__ARM_PCS_VFP)
return true;
#else
return false;
#endif
#elif GCC_VERSION < 40500
#elif GCC_VERSION < 40500 && !defined(__clang__)
return false;
#else
@ -89,7 +89,7 @@ bool OS::ArmUsingHardFloat() {
!defined(__VFP_FP__)
return false;
#else
#error "Your version of GCC does not report the FP ABI compiled for." \
#error "Your version of compiler does not report the FP ABI compiled for." \
"Please report it on this issue" \
"http://code.google.com/p/v8/issues/detail?id=2140"

17
deps/v8/src/base/platform/platform-posix.cc

@ -81,6 +81,8 @@ int OS::ActivationFrameAlignment() {
return 8;
#elif V8_TARGET_ARCH_MIPS
return 8;
#elif V8_TARGET_ARCH_S390
return 8;
#else
// Otherwise we just assume 16 byte alignment, i.e.:
// - With gcc 4.4 the tree vectorization optimizer can generate code
@ -185,6 +187,15 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
raw_addr &= V8_UINT64_C(0xfffffff000);
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
@ -252,6 +263,9 @@ void OS::DebugBreak() {
#endif // V8_OS_NACL
#elif V8_HOST_ARCH_X64
asm("int $3");
#elif V8_HOST_ARCH_S390
// Software breakpoint instruction is 0x0001
asm volatile(".word 0x0001");
#else
#error Unsupported host architecture.
#endif
@ -415,9 +429,10 @@ bool OS::Remove(const char* path) {
return (remove(path) == 0);
}
char OS::DirectorySeparator() { return '/'; }
bool OS::isDirectorySeparator(const char ch) {
return ch == '/';
return ch == DirectorySeparator();
}

1
deps/v8/src/base/platform/platform-win32.cc

@ -574,6 +574,7 @@ bool OS::Remove(const char* path) {
return (DeleteFileA(path) != 0);
}
char OS::DirectorySeparator() { return '\\'; }
bool OS::isDirectorySeparator(const char ch) {
return ch == '/' || ch == '\\';

5
deps/v8/src/base/platform/platform.h

@ -142,6 +142,7 @@ class OS {
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
static char DirectorySeparator();
static bool isDirectorySeparator(const char ch);
// Opens a temporary file, the file is auto removed on close.
@ -290,6 +291,10 @@ class VirtualMemory {
// by address().
VirtualMemory(size_t size, size_t alignment);
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(void* address, size_t size) : address_(address), size_(size) {}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();

3
deps/v8/src/base/platform/semaphore.cc

@ -94,8 +94,7 @@ Semaphore::~Semaphore() {
void Semaphore::Signal() {
int result = sem_post(&native_handle_);
DCHECK_EQ(0, result);
USE(result);
CHECK_EQ(0, result);
}

84
deps/v8/src/base/platform/time.cc

@ -520,14 +520,6 @@ bool TimeTicks::IsHighResolutionClockWorking() {
return high_res_tick_clock.Pointer()->IsHighResolution();
}
// static
TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
// static
bool TimeTicks::KernelTimestampAvailable() { return false; }
#else // V8_OS_WIN
TimeTicks TimeTicks::Now() {
@ -566,82 +558,6 @@ bool TimeTicks::IsHighResolutionClockWorking() {
return true;
}
#if V8_OS_LINUX
class KernelTimestampClock {
public:
KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
clock_fd_ = open(kTraceClockDevice, O_RDONLY);
if (clock_fd_ == -1) {
return;
}
clock_id_ = get_clockid(clock_fd_);
}
virtual ~KernelTimestampClock() {
if (clock_fd_ != -1) {
close(clock_fd_);
}
}
int64_t Now() {
if (clock_id_ == kClockInvalid) {
return 0;
}
struct timespec ts;
clock_gettime(clock_id_, &ts);
return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
}
bool Available() { return clock_id_ != kClockInvalid; }
private:
static const clockid_t kClockInvalid = -1;
static const char kTraceClockDevice[];
static const uint64_t kNsecPerSec = 1000000000;
int clock_fd_;
clockid_t clock_id_;
static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
};
// Timestamp module name
const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
#else
class KernelTimestampClock {
public:
KernelTimestampClock() {}
int64_t Now() { return 0; }
bool Available() { return false; }
};
#endif // V8_OS_LINUX
static LazyStaticInstance<KernelTimestampClock,
DefaultConstructTrait<KernelTimestampClock>,
ThreadSafeInitOnceTrait>::type kernel_tick_clock =
LAZY_STATIC_INSTANCE_INITIALIZER;
// static
TimeTicks TimeTicks::KernelTimestampNow() {
return TimeTicks(kernel_tick_clock.Pointer()->Now());
}
// static
bool TimeTicks::KernelTimestampAvailable() {
return kernel_tick_clock.Pointer()->Available();
}
#endif // V8_OS_WIN
} // namespace base

7
deps/v8/src/base/platform/time.h

@ -318,13 +318,6 @@ class TimeTicks final {
// Returns true if the high-resolution clock is working on this system.
static bool IsHighResolutionClockWorking();
// Returns Linux kernel timestamp for generating profiler events. This method
// returns null TimeTicks if the kernel cannot provide the timestamps (e.g.,
// on non-Linux OS or if the kernel module for timestamps is not loaded).
static TimeTicks KernelTimestampNow();
static bool KernelTimestampAvailable();
// Returns true if this object has not been initialized.
bool IsNull() const { return ticks_ == 0; }

2
deps/v8/src/base/win32-headers.h

@ -76,6 +76,8 @@
#undef CreateSemaphore
#undef Yield
#undef RotateRight32
#undef RotateLeft32
#undef RotateRight64
#undef RotateLeft64
#endif // V8_BASE_WIN32_HEADERS_H_

420
deps/v8/src/bootstrapper.cc

@ -157,8 +157,8 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
void CreateIteratorMaps();
void CreateJSProxyMaps();
// Make the "arguments" and "caller" properties throw a TypeError on access.
void AddRestrictedFunctionProperties(Handle<Map> map);
@ -218,7 +218,6 @@ class Genesis BASE_EMBEDDED {
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
void InitializeNormalizedMapCaches();
void InstallJSProxyMaps();
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
@ -284,13 +283,10 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
Handle<JSFunction> empty_function);
Handle<Map> CreateStrongFunctionMap(Handle<JSFunction> empty_function,
bool is_constructor);
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
static bool CallUtilsFunction(Isolate* isolate, const char* name);
@ -547,12 +543,6 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_initial_array_prototype(*object_function_prototype);
Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
.Assert();
// Allocate initial strong object map.
Handle<Map> strong_object_map =
Map::Copy(Handle<Map>(object_fun->initial_map()), "EmptyStrongObject");
strong_object_map->set_is_strong();
native_context()->set_js_object_strong_map(*strong_object_map);
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
@ -637,29 +627,6 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
}
void Genesis::SetStrongFunctionInstanceDescriptor(Handle<Map> map) {
Map::EnsureDescriptorSlack(map, 2);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), ro_attribs);
{ // Add length.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
length, ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
ro_attribs);
map->AppendDescriptor(&d);
}
}
// Creates the %ThrowTypeError% function.
Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
Builtins::Name builtin_name) {
@ -722,19 +689,6 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
}
Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
map->set_is_constructor(is_constructor);
Map::SetPrototype(map, empty_function);
map->set_is_callable();
map->set_is_extensible(is_constructor);
map->set_is_strong();
return map;
}
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_function_without_prototype_map =
@ -756,16 +710,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
}
void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for strong mode instances, which never have prototypes.
Handle<Map> strong_function_map = CreateStrongFunctionMap(empty, false);
native_context()->set_strong_function_map(*strong_function_map);
// Constructors do, though.
Handle<Map> strong_constructor_map = CreateStrongFunctionMap(empty, true);
native_context()->set_strong_constructor_map(*strong_constructor_map);
}
void Genesis::CreateIteratorMaps() {
// Create iterator-related meta-objects.
Handle<JSObject> iterator_prototype =
@ -803,15 +747,6 @@ void Genesis::CreateIteratorMaps() {
native_context()->set_strict_generator_function_map(
*strict_generator_function_map);
Handle<Map> strong_function_map(native_context()->strong_function_map());
Handle<Map> strong_generator_function_map =
Map::Copy(strong_function_map, "StrongGeneratorFunction");
strong_generator_function_map->set_is_constructor(false);
Map::SetPrototype(strong_generator_function_map,
generator_function_prototype);
native_context()->set_strong_generator_function_map(
*strong_generator_function_map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
Map::SetPrototype(generator_object_prototype_map, generator_object_prototype);
@ -819,6 +754,30 @@ void Genesis::CreateIteratorMaps() {
*generator_object_prototype_map);
}
void Genesis::CreateJSProxyMaps() {
// Allocate the different maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
// constructable proxies.
Handle<Map> proxy_function_map =
Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
proxy_function_map->set_is_constructor(true);
native_context()->set_proxy_function_map(*proxy_function_map);
Handle<Map> proxy_map =
factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
proxy_map->set_dictionary_map(true);
native_context()->set_proxy_map(*proxy_map);
Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
proxy_callable_map->set_is_callable();
native_context()->set_proxy_callable_map(*proxy_callable_map);
proxy_callable_map->SetConstructor(native_context()->function_function());
Handle<Map> proxy_constructor_map =
Map::Copy(proxy_callable_map, "constructor Proxy");
proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
}
static void ReplaceAccessors(Handle<Map> map,
Handle<String> name,
@ -942,7 +901,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
#ifdef DEBUG
LookupIterator it(prototype, factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked();
Handle<Object> value = Object::GetProperty(&it).ToHandleChecked();
DCHECK(it.IsFound());
DCHECK_EQ(*isolate()->object_function(), *value);
#endif
@ -1121,6 +1080,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kObjectPreventExtensions, 1, false);
SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
false);
SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
Builtins::kObjectHasOwnProperty, 1, true);
}
Handle<JSObject> global(native_context()->global_object());
@ -1171,7 +1133,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
strict_function_map_writable_prototype_->SetConstructor(*function_fun);
native_context()->strong_function_map()->SetConstructor(*function_fun);
}
{ // --- A r r a y ---
@ -1180,7 +1141,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(),
Builtins::kArrayCode);
array_function->shared()->DontAdaptArguments();
array_function->shared()->set_function_data(Smi::FromInt(kArrayCode));
array_function->shared()->set_builtin_function_id(kArrayCode);
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
@ -1214,11 +1175,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Code> code = array_constructor_stub.GetCode();
array_function->shared()->set_construct_stub(*code);
Handle<Map> initial_strong_map =
Map::Copy(initial_map, "SetInstancePrototype");
initial_strong_map->set_is_strong();
CacheInitialJSArrayMaps(native_context(), initial_strong_map);
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
array_function, isolate->factory()->InternalizeUtf8String("isArray"),
Builtins::kArrayIsArray, 1, true);
@ -1292,6 +1248,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
attribs);
string_map->AppendDescriptor(&d);
}
// Install the String.fromCharCode function.
SimpleInstallFunction(string_fun, "fromCharCode",
Builtins::kStringFromCharCode, 1, false);
}
{
@ -1303,7 +1263,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
prototype, Builtins::kSymbolConstructor);
symbol_fun->shared()->set_construct_stub(
*isolate->builtins()->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(1);
symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@ -1560,8 +1520,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
DCHECK(math->IsJSObject());
JSObject::AddProperty(global, name, math, DONT_ENUM);
SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
Handle<JSFunction> math_floor =
SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
native_context()->set_math_floor(*math_floor);
SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
Handle<JSFunction> math_sqrt =
SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
native_context()->set_math_sqrt(*math_sqrt);
SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
}
{ // -- A r r a y B u f f e r
@ -1649,6 +1624,74 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_WEAK_SET_FUN_INDEX);
}
{ // -- P r o x y
CreateJSProxyMaps();
Handle<String> name = factory->Proxy_string();
Handle<Code> code(isolate->builtins()->ProxyConstructor());
Handle<JSFunction> proxy_function =
factory->NewFunction(isolate->proxy_function_map(),
factory->Proxy_string(), MaybeHandle<Code>(code));
JSFunction::SetInitialMap(
proxy_function, Handle<Map>(native_context()->proxy_map(), isolate),
factory->null_value());
proxy_function->shared()->set_construct_stub(
*isolate->builtins()->ProxyConstructor_ConstructStub());
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
native_context()->set_proxy_function(*proxy_function);
InstallFunction(global, name, proxy_function, factory->Object_string());
}
{ // -- R e f l e c t
Handle<String> reflect_string = factory->InternalizeUtf8String("Reflect");
Handle<JSObject> reflect =
factory->NewJSObject(isolate->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
Handle<JSFunction> define_property =
SimpleInstallFunction(reflect, factory->defineProperty_string(),
Builtins::kReflectDefineProperty, 3, true);
native_context()->set_reflect_define_property(*define_property);
Handle<JSFunction> delete_property =
SimpleInstallFunction(reflect, factory->deleteProperty_string(),
Builtins::kReflectDeleteProperty, 2, true);
native_context()->set_reflect_delete_property(*delete_property);
Handle<JSFunction> apply = SimpleInstallFunction(
reflect, factory->apply_string(), Builtins::kReflectApply, 3, false);
native_context()->set_reflect_apply(*apply);
Handle<JSFunction> construct =
SimpleInstallFunction(reflect, factory->construct_string(),
Builtins::kReflectConstruct, 2, false);
native_context()->set_reflect_construct(*construct);
SimpleInstallFunction(reflect, factory->get_string(), Builtins::kReflectGet,
2, false);
SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
Builtins::kReflectGetPrototypeOf, 1, true);
SimpleInstallFunction(reflect, factory->has_string(), Builtins::kReflectHas,
2, true);
SimpleInstallFunction(reflect, factory->isExtensible_string(),
Builtins::kReflectIsExtensible, 1, true);
SimpleInstallFunction(reflect, factory->ownKeys_string(),
Builtins::kReflectOwnKeys, 1, true);
SimpleInstallFunction(reflect, factory->preventExtensions_string(),
Builtins::kReflectPreventExtensions, 1, true);
SimpleInstallFunction(reflect, factory->set_string(), Builtins::kReflectSet,
3, false);
SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
Builtins::kReflectSetPrototypeOf, 2, true);
}
{ // --- B o u n d F u n c t i o n
Handle<Map> map =
factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
@ -1924,10 +1967,11 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> script_name =
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
false);
Handle<SharedFunctionInfo> function_info =
Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
false);
if (function_info.is_null()) return false;
DCHECK(context->IsNativeContext());
@ -1981,7 +2025,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
if (!cache->Lookup(name, &function_info)) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
function_info = Compiler::CompileScript(
function_info = Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, extension, NULL, ScriptCompiler::kNoCompileOptions,
EXTENSION_CODE, false);
@ -2021,7 +2065,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Handle<String> property_string = factory->InternalizeUtf8String(property);
DCHECK(!property_string.is_null());
Handle<JSObject> object = Handle<JSObject>::cast(
Object::GetProperty(global, property_string).ToHandleChecked());
JSReceiver::GetProperty(global, property_string).ToHandleChecked());
if (strcmp("prototype", inner) == 0) {
Handle<JSFunction> function = Handle<JSFunction>::cast(object);
return Handle<JSObject>(JSObject::cast(function->prototype()));
@ -2029,7 +2073,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
Handle<String> inner_string = factory->InternalizeUtf8String(inner);
DCHECK(!inner_string.is_null());
Handle<Object> value =
Object::GetProperty(object, inner_string).ToHandleChecked();
JSReceiver::GetProperty(object, inner_string).ToHandleChecked();
return Handle<JSObject>::cast(value);
}
@ -2129,8 +2173,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
*generator_function_function);
native_context->strict_generator_function_map()->SetConstructor(
*generator_function_function);
native_context->strong_generator_function_map()->SetConstructor(
*generator_function_function);
}
{ // -- S e t I t e r a t o r
@ -2315,7 +2357,6 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
INITIALIZE_FLAG(FLAG_harmony_tostring)
INITIALIZE_FLAG(FLAG_harmony_species)
#undef INITIALIZE_FLAG
@ -2325,18 +2366,14 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_bind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
@ -2344,6 +2381,9 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@ -2359,13 +2399,6 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
}
void Genesis::InitializeGlobal_harmony_tostring() {
if (!FLAG_harmony_tostring) return;
InstallPublicSymbol(factory(), native_context(), "toStringTag",
factory()->to_string_tag_symbol());
}
void Genesis::InitializeGlobal_harmony_regexp_subclass() {
if (!FLAG_harmony_regexp_subclass) return;
InstallPublicSymbol(factory(), native_context(), "match",
@ -2379,66 +2412,6 @@ void Genesis::InitializeGlobal_harmony_regexp_subclass() {
}
void Genesis::InitializeGlobal_harmony_reflect() {
Factory* factory = isolate()->factory();
// We currently use some of the Reflect functions internally, even when
// the --harmony-reflect flag is not given.
Handle<JSFunction> define_property =
SimpleCreateFunction(isolate(), factory->defineProperty_string(),
Builtins::kReflectDefineProperty, 3, true);
native_context()->set_reflect_define_property(*define_property);
Handle<JSFunction> delete_property =
SimpleCreateFunction(isolate(), factory->deleteProperty_string(),
Builtins::kReflectDeleteProperty, 2, true);
native_context()->set_reflect_delete_property(*delete_property);
Handle<JSFunction> apply = SimpleCreateFunction(
isolate(), factory->apply_string(), Builtins::kReflectApply, 3, false);
native_context()->set_reflect_apply(*apply);
Handle<JSFunction> construct =
SimpleCreateFunction(isolate(), factory->construct_string(),
Builtins::kReflectConstruct, 2, false);
native_context()->set_reflect_construct(*construct);
if (!FLAG_harmony_reflect) return;
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context()->global_object()));
Handle<String> reflect_string = factory->NewStringFromStaticChars("Reflect");
Handle<JSObject> reflect =
factory->NewJSObject(isolate()->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
InstallFunction(reflect, define_property, factory->defineProperty_string());
InstallFunction(reflect, delete_property, factory->deleteProperty_string());
InstallFunction(reflect, apply, factory->apply_string());
InstallFunction(reflect, construct, factory->construct_string());
SimpleInstallFunction(reflect, factory->get_string(),
Builtins::kReflectGet, 2, false);
SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
Builtins::kReflectGetPrototypeOf, 1, true);
SimpleInstallFunction(reflect, factory->has_string(),
Builtins::kReflectHas, 2, true);
SimpleInstallFunction(reflect, factory->isExtensible_string(),
Builtins::kReflectIsExtensible, 1, true);
SimpleInstallFunction(reflect, factory->ownKeys_string(),
Builtins::kReflectOwnKeys, 1, true);
SimpleInstallFunction(reflect, factory->preventExtensions_string(),
Builtins::kReflectPreventExtensions, 1, true);
SimpleInstallFunction(reflect, factory->set_string(),
Builtins::kReflectSet, 3, false);
SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
Builtins::kReflectSetPrototypeOf, 2, true);
}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@ -2509,64 +2482,27 @@ void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
}
void Genesis::InstallJSProxyMaps() {
// Allocate the different maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
// constructable proxies.
Handle<Map> proxy_function_map =
Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
proxy_function_map->set_is_constructor(true);
native_context()->set_proxy_function_map(*proxy_function_map);
Handle<Map> proxy_map =
factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
proxy_map->set_dictionary_map(true);
native_context()->set_proxy_map(*proxy_map);
Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
proxy_callable_map->set_is_callable();
native_context()->set_proxy_callable_map(*proxy_callable_map);
proxy_callable_map->SetConstructor(native_context()->function_function());
Handle<Map> proxy_constructor_map =
Map::Copy(proxy_callable_map, "constructor Proxy");
proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
}
void Genesis::InitializeGlobal_harmony_proxies() {
if (!FLAG_harmony_proxies) return;
Handle<JSGlobalObject> global(
JSGlobalObject::cast(native_context()->global_object()));
Isolate* isolate = global->GetIsolate();
Factory* factory = isolate->factory();
InstallJSProxyMaps();
// Create the Proxy object.
Handle<String> name = factory->Proxy_string();
Handle<Code> code(isolate->builtins()->ProxyConstructor());
Handle<JSFunction> proxy_function =
factory->NewFunction(isolate->proxy_function_map(),
factory->Proxy_string(), MaybeHandle<Code>(code));
JSFunction::SetInitialMap(proxy_function,
Handle<Map>(native_context()->proxy_map(), isolate),
factory->null_value());
proxy_function->shared()->set_construct_stub(
*isolate->builtins()->ProxyConstructor_ConstructStub());
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
Handle<JSObject> array_prototype(
JSObject::cast(array_constructor->instance_prototype()));
Handle<Object> values_iterator =
JSObject::GetProperty(array_prototype, factory()->iterator_symbol())
.ToHandleChecked();
DCHECK(values_iterator->IsJSFunction());
JSObject::AddProperty(array_prototype, factory()->values_string(),
values_iterator, DONT_ENUM);
native_context()->set_proxy_function(*proxy_function);
InstallFunction(global, name, proxy_function, factory->Object_string());
Handle<Object> unscopables =
JSObject::GetProperty(array_prototype, factory()->unscopables_symbol())
.ToHandleChecked();
DCHECK(unscopables->IsJSObject());
JSObject::AddProperty(Handle<JSObject>::cast(unscopables),
factory()->values_string(), factory()->true_value(),
NONE);
}
Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
const char* name) {
// Setup the {prototype} with the given {name} for @@toStringTag.
@ -2708,9 +2644,8 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
auto template_instantiations_cache =
ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
USE_CUSTOM_MINIMUM_CAPACITY);
auto template_instantiations_cache = UnseededNumberDictionary::New(
isolate(), ApiNatives::kInitialFunctionCacheSize);
native_context()->set_template_instantiations_cache(
*template_instantiations_cache);
@ -2777,7 +2712,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
Handle<String> key = factory()->Promise_string();
Handle<JSFunction> function = Handle<JSFunction>::cast(
Object::GetProperty(handle(native_context()->global_object()), key)
JSReceiver::GetProperty(handle(native_context()->global_object()), key)
.ToHandleChecked());
JSFunction::EnsureHasInitialMap(function);
function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
@ -2789,6 +2724,37 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallBuiltinFunctionIds();
// Also install builtin function ids to some generator object methods. These
// three methods use the three resume operations (Runtime_GeneratorNext,
// Runtime_GeneratorReturn, Runtime_GeneratorThrow) respectively. Those
// operations are not supported by Crankshaft, TurboFan, nor Ignition.
{
Handle<JSObject> generator_object_prototype(JSObject::cast(
native_context()->generator_object_prototype_map()->prototype()));
{ // GeneratorObject.prototype.next
Handle<String> key = factory()->next_string();
Handle<JSFunction> function = Handle<JSFunction>::cast(
JSReceiver::GetProperty(generator_object_prototype, key)
.ToHandleChecked());
function->shared()->set_builtin_function_id(kGeneratorObjectNext);
}
{ // GeneratorObject.prototype.return
Handle<String> key = factory()->NewStringFromAsciiChecked("return");
Handle<JSFunction> function = Handle<JSFunction>::cast(
JSReceiver::GetProperty(generator_object_prototype, key)
.ToHandleChecked());
function->shared()->set_builtin_function_id(kGeneratorObjectReturn);
}
{ // GeneratorObject.prototype.throw
Handle<String> key = factory()->throw_string();
Handle<JSFunction> function = Handle<JSFunction>::cast(
JSReceiver::GetProperty(generator_object_prototype, key)
.ToHandleChecked());
function->shared()->set_builtin_function_id(kGeneratorObjectThrow);
}
}
// Create a map for accessor property descriptors (a variant of JSObject
// that predefines four properties get, set, configurable and enumerable).
{
@ -2969,11 +2935,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
bool Genesis::InstallExperimentalNatives() {
static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
static const char* harmony_modules_natives[] = {nullptr};
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
nullptr};
static const char* harmony_tostring_natives[] = {nullptr};
static const char* harmony_iterator_close_natives[] = {nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
@ -2983,11 +2944,6 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_unicode_regexps_natives[] = {
"native harmony-unicode-regexps.js", nullptr};
static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
static const char* harmony_destructuring_bind_natives[] = {nullptr};
static const char* harmony_destructuring_assignment_natives[] = {nullptr};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
@ -2995,9 +2951,12 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
static const char* harmony_regexp_exec_natives[] = {
"native harmony-regexp-exec.js", nullptr};
static const char* harmony_regexp_subclass_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
static const char* harmony_instanceof_natives[] = {nullptr};
static const char* harmony_restrictive_declarations_natives[] = {nullptr};
static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_name_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
@ -3006,6 +2965,10 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_object_values_entries_natives[] = {nullptr};
static const char* harmony_object_own_property_descriptors_natives[] = {
nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
static const char* harmony_exponentiation_operator_natives[] = {nullptr};
static const char* harmony_string_padding_natives[] = {
"native harmony-string-padding.js", nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@ -3075,9 +3038,9 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
BuiltinFunctionId id) {
Isolate* isolate = holder->GetIsolate();
Handle<Object> function_object =
Object::GetProperty(isolate, holder, function_name).ToHandleChecked();
JSReceiver::GetProperty(isolate, holder, function_name).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
function->shared()->set_function_data(Smi::FromInt(id));
function->shared()->set_builtin_function_id(id);
}
@ -3596,7 +3559,6 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateStrongModeFunctionMaps(empty_function);
CreateIteratorMaps();
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);

1155
deps/v8/src/builtins.cc

File diff suppressed because it is too large

62
deps/v8/src/builtins.h

@ -11,6 +11,13 @@
namespace v8 {
namespace internal {
namespace compiler {
// Forward declarations.
class CodeStubAssembler;
} // namespace compiler
// Specifies extra arguments required by a C++ builtin.
enum class BuiltinExtraArguments : uint8_t {
kNone = 0u,
@ -110,12 +117,17 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(FunctionConstructor, kTargetAndNewTarget) \
V(FunctionPrototypeBind, kNone) \
V(FunctionPrototypeToString, kNone) \
V(FunctionHasInstance, kNone) \
\
V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
\
V(GlobalEval, kTarget) \
\
V(MathAcos, kNone) \
V(MathAsin, kNone) \
V(MathAtan, kNone) \
V(MathFround, kNone) \
V(MathImul, kNone) \
\
V(ObjectAssign, kNone) \
V(ObjectCreate, kNone) \
V(ObjectFreeze, kNone) \
@ -149,6 +161,8 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(ReflectSet, kNone) \
V(ReflectSetPrototypeOf, kNone) \
\
V(StringFromCharCode, kNone) \
\
V(SymbolConstructor, kNone) \
V(SymbolConstructor_ConstructStub, kTarget) \
\
@ -265,6 +279,7 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(FunctionHasInstance, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
@ -285,7 +300,6 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@ -293,6 +307,16 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtins implemented in TurboFan (with JS linkage).
#define BUILTIN_LIST_T(V) \
V(MathCeil, 2) \
V(MathClz32, 2) \
V(MathFloor, 2) \
V(MathRound, 2) \
V(MathSqrt, 2) \
V(MathTrunc, 2) \
V(ObjectHasOwnProperty, 2)
// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
V(LoadIC_Slow, LOAD_IC) \
@ -331,14 +355,16 @@ class Builtins {
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
#define DEF_ENUM_T(name, argc) k##name,
#define DEF_ENUM_H(name, kind) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
#undef DEF_ENUM_C
#undef DEF_ENUM_A
builtin_count
#undef DEF_ENUM_T
#undef DEF_ENUM_H
builtin_count
};
enum CFunctionId {
@ -351,13 +377,17 @@ class Builtins {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
#undef DECLARE_BUILTIN_ACCESSOR_T
#undef DECLARE_BUILTIN_ACCESSOR_H
// Convenience wrappers.
Handle<Code> CallFunction(
@ -548,6 +578,7 @@ class Builtins {
// ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
static void Generate_FunctionHasInstance(MacroAssembler* masm);
static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
@ -557,6 +588,12 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
// ES6 section 20.2.2.10 Math.ceil ( x )
static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
// ES6 section 20.2.2.11 Math.clz32 ( x )
static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
// ES6 section 20.2.2.16 Math.floor ( x )
static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
// ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
@ -567,16 +604,25 @@ class Builtins {
static void Generate_MathMin(MacroAssembler* masm) {
Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
}
// ES6 section 20.2.2.28 Math.round ( x )
static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
// ES6 section 20.2.2.32 Math.sqrt ( x )
static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
// ES6 section 20.2.2.35 Math.trunc ( x )
static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
static void Generate_NumberConstructor(MacroAssembler* masm);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
// ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
static void Generate_ObjectHasOwnProperty(
compiler::CodeStubAssembler* assembler);
static void Generate_StringConstructor(MacroAssembler* masm);
static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
static void Generate_StackCheck(MacroAssembler* masm);

180
deps/v8/src/code-factory.cc

@ -118,13 +118,6 @@ Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
}
// static
Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
Handle<Code> code = CompareNilICStub::GetUninitialized(isolate, nil_value);
return Callable(code, CompareNilDescriptor(isolate));
}
// static
Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
BinaryOpICStub stub(isolate, op);
@ -141,8 +134,8 @@ Callable CodeFactory::InstanceOf(Isolate* isolate) {
// static
Callable CodeFactory::ToBoolean(Isolate* isolate) {
Handle<Code> code = ToBooleanStub::GetUninitialized(isolate);
return Callable(code, ToBooleanDescriptor(isolate));
ToBooleanStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@ -153,6 +146,18 @@ Callable CodeFactory::ToNumber(Isolate* isolate) {
}
// static
Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
NonNumberToNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringToNumber(Isolate* isolate) {
StringToNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::ToString(Isolate* isolate) {
ToStringStub stub(isolate);
@ -167,6 +172,12 @@ Callable CodeFactory::ToName(Isolate* isolate) {
}
// static
Callable CodeFactory::ToInteger(Isolate* isolate) {
ToIntegerStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::ToLength(Isolate* isolate) {
ToLengthStub stub(isolate);
@ -201,6 +212,83 @@ Callable CodeFactory::RegExpExec(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::Add(Isolate* isolate) {
AddStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::Subtract(Isolate* isolate) {
SubtractStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
BitwiseAndStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::BitwiseOr(Isolate* isolate) {
BitwiseOrStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::BitwiseXor(Isolate* isolate) {
BitwiseXorStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::LessThan(Isolate* isolate) {
LessThanStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
LessThanOrEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::GreaterThan(Isolate* isolate) {
GreaterThanStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
GreaterThanOrEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::Equal(Isolate* isolate) {
EqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::NotEqual(Isolate* isolate) {
NotEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StrictEqual(Isolate* isolate) {
StrictEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
StrictNotEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
@ -209,13 +297,65 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
switch (token) {
case Token::EQ:
case Token::EQ_STRICT:
return StringEqual(isolate);
case Token::NE:
case Token::NE_STRICT:
return StringNotEqual(isolate);
case Token::LT:
return StringLessThan(isolate);
case Token::GT:
return StringGreaterThan(isolate);
case Token::LTE:
return StringLessThanOrEqual(isolate);
case Token::GTE:
return StringGreaterThanOrEqual(isolate);
default:
break;
}
UNREACHABLE();
return StringEqual(isolate);
}
// static
Callable CodeFactory::StringCompare(Isolate* isolate) {
StringCompareStub stub(isolate);
Callable CodeFactory::StringEqual(Isolate* isolate) {
StringEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringNotEqual(Isolate* isolate) {
StringNotEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringLessThan(Isolate* isolate) {
StringLessThanStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
StringLessThanOrEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
StringGreaterThanStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
StringGreaterThanOrEqualStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::SubString(Isolate* isolate) {
@ -224,6 +364,12 @@ Callable CodeFactory::SubString(Isolate* isolate) {
}
// static
Callable CodeFactory::StoreInterceptor(Isolate* isolate) {
StoreInterceptorStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
@ -310,6 +456,13 @@ Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
Allocate##Type##Stub stub(isolate); \
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); \
}
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
// static
Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
@ -326,8 +479,9 @@ Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
// static
Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) {
return Callable(isolate->builtins()->Call(mode),
Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
return Callable(isolate->builtins()->Call(mode, tail_call_mode),
CallTrampolineDescriptor(isolate));
}

34
deps/v8/src/code-factory.h

@ -54,6 +54,8 @@ class CodeFactory final {
Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
static Callable StoreInterceptor(Isolate* isolate);
static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
@ -66,8 +68,11 @@ class CodeFactory final {
static Callable ToBoolean(Isolate* isolate);
static Callable ToNumber(Isolate* isolate);
static Callable NonNumberToNumber(Isolate* isolate);
static Callable StringToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
static Callable ToName(Isolate* isolate);
static Callable ToInteger(Isolate* isolate);
static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
static Callable NumberToString(Isolate* isolate);
@ -75,9 +80,29 @@ class CodeFactory final {
static Callable RegExpConstructResult(Isolate* isolate);
static Callable RegExpExec(Isolate* isolate);
static Callable Add(Isolate* isolate);
static Callable Subtract(Isolate* isolate);
static Callable BitwiseAnd(Isolate* isolate);
static Callable BitwiseOr(Isolate* isolate);
static Callable BitwiseXor(Isolate* isolate);
static Callable LessThan(Isolate* isolate);
static Callable LessThanOrEqual(Isolate* isolate);
static Callable GreaterThan(Isolate* isolate);
static Callable GreaterThanOrEqual(Isolate* isolate);
static Callable Equal(Isolate* isolate);
static Callable NotEqual(Isolate* isolate);
static Callable StrictEqual(Isolate* isolate);
static Callable StrictNotEqual(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
static Callable StringCompare(Isolate* isolate);
static Callable StringCompare(Isolate* isolate, Token::Value token);
static Callable StringEqual(Isolate* isolate);
static Callable StringNotEqual(Isolate* isolate);
static Callable StringLessThan(Isolate* isolate);
static Callable StringLessThanOrEqual(Isolate* isolate);
static Callable StringGreaterThan(Isolate* isolate);
static Callable StringGreaterThanOrEqual(Isolate* isolate);
static Callable SubString(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
@ -96,11 +121,16 @@ class CodeFactory final {
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable AllocateMutableHeapNumber(Isolate* isolate);
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
static Callable Allocate##Type(Isolate* isolate);
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
static Callable AllocateInNewSpace(Isolate* isolate);
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable CallFunction(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable Construct(Isolate* isolate);

284
deps/v8/src/code-stubs-hydrogen.cc

@ -78,6 +78,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
Representation representation,
bool transition_to_field);
HValue* BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements, ElementsKind kind);
enum ArgumentClass {
NONE,
SINGLE,
@ -294,7 +297,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
Zone zone;
Zone zone(isolate->allocator());
CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
stub->GetCodeFlags());
// Parameter count is number of stack parameters.
@ -780,6 +783,214 @@ Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements,
ElementsKind kind) {
// Precheck whether all elements fit into the array.
if (!IsFastObjectElementsKind(kind)) {
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, argc, Token::LT);
{
HInstruction* argument =
Add<HAccessArgumentsAt>(argument_elements, argc, key);
IfBuilder can_store(this);
can_store.IfNot<HIsSmiAndBranch>(argument);
if (IsFastDoubleElementsKind(kind)) {
can_store.And();
can_store.IfNot<HCompareMap>(argument,
isolate()->factory()->heap_number_map());
}
can_store.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
can_store.End();
}
builder.EndBody();
}
HValue* length = Add<HLoadNamedField>(object, nullptr,
HObjectAccess::ForArrayLength(kind));
HValue* new_length = AddUncasted<HAdd>(length, argc);
HValue* max_key = AddUncasted<HSub>(new_length, graph()->GetConstant1());
HValue* elements = Add<HLoadNamedField>(object, nullptr,
HObjectAccess::ForElementsPointer());
elements = BuildCheckForCapacityGrow(object, elements, kind, length, max_key,
true, STORE);
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
HValue* start = graph()->GetConstant0();
HValue* key = builder.BeginBody(start, argc, Token::LT);
{
HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
HValue* index = AddUncasted<HAdd>(key, length);
AddElementAccess(elements, index, argument, object, nullptr, kind, STORE);
}
builder.EndBody();
return new_length;
}
template <>
HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
// TODO(verwaest): Fix deoptimizer messages.
HValue* argc = GetArgumentsLength();
HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
graph()->GetConstantMinus1());
BuildCheckHeapObject(object);
HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_ARRAY);
// Disallow pushing onto prototypes. It might be the JSArray prototype.
// Disallow pushing onto non-extensible objects.
{
HValue* bit_field2 =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
HValue* mask =
Add<HConstant>(static_cast<int>(Map::IsPrototypeMapBits::kMask) |
(1 << Map::kIsExtensible));
HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field2, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(
bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
check.End();
}
// Disallow pushing onto observed objects.
{
HValue* bit_field =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
HValue* mask = Add<HConstant>(1 << Map::kIsObserved);
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
check.End();
}
// Disallow pushing onto arrays in dictionary named property mode. We need to
// figure out whether the length property is still writable.
{
HValue* bit_field3 =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
check.End();
}
// Check whether the length property is writable. The length property is the
// only default named property on arrays. It's nonconfigurable, hence is
// guaranteed to stay the first property.
{
HValue* descriptors =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
HValue* details = Add<HLoadKeyed>(
descriptors, Add<HConstant>(DescriptorArray::ToDetailsIndex(0)),
nullptr, nullptr, FAST_SMI_ELEMENTS);
HValue* mask =
Add<HConstant>(READ_ONLY << PropertyDetails::AttributesField::kShift);
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
IfBuilder readonly(this);
readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
readonly.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
readonly.End();
}
HValue* null = Add<HLoadRoot>(Heap::kNullValueRootIndex);
HValue* empty = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
environment()->Push(map);
LoopBuilder check_prototypes(this);
check_prototypes.BeginBody(1);
{
HValue* parent_map = environment()->Pop();
HValue* prototype = Add<HLoadNamedField>(parent_map, nullptr,
HObjectAccess::ForPrototype());
IfBuilder is_null(this);
is_null.If<HCompareObjectEqAndBranch>(prototype, null);
is_null.Then();
check_prototypes.Break();
is_null.End();
HValue* prototype_map =
Add<HLoadNamedField>(prototype, nullptr, HObjectAccess::ForMap());
HValue* instance_type = Add<HLoadNamedField>(
prototype_map, nullptr, HObjectAccess::ForMapInstanceType());
IfBuilder check_instance_type(this);
check_instance_type.If<HCompareNumericAndBranch>(
instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
Token::LTE);
check_instance_type.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
check_instance_type.End();
HValue* elements = Add<HLoadNamedField>(
prototype, nullptr, HObjectAccess::ForElementsPointer());
IfBuilder no_elements(this);
no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
no_elements.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
no_elements.End();
environment()->Push(prototype_map);
}
check_prototypes.EndBody();
HValue* bit_field2 =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
HValue* kind = BuildDecodeField<Map::ElementsKindBits>(bit_field2);
// Below we only check the upper bound of the relevant ranges to include both
// holey and non-holey versions. We check them in order smi, object, double
// since smi < object < double.
STATIC_ASSERT(FAST_SMI_ELEMENTS < FAST_HOLEY_SMI_ELEMENTS);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS < FAST_HOLEY_ELEMENTS);
STATIC_ASSERT(FAST_ELEMENTS < FAST_HOLEY_ELEMENTS);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
IfBuilder has_smi_elements(this);
has_smi_elements.If<HCompareNumericAndBranch>(
kind, Add<HConstant>(FAST_HOLEY_SMI_ELEMENTS), Token::LTE);
has_smi_elements.Then();
{
HValue* new_length = BuildPushElement(object, argc, argument_elements,
FAST_HOLEY_SMI_ELEMENTS);
environment()->Push(new_length);
}
has_smi_elements.Else();
{
IfBuilder has_object_elements(this);
has_object_elements.If<HCompareNumericAndBranch>(
kind, Add<HConstant>(FAST_HOLEY_ELEMENTS), Token::LTE);
has_object_elements.Then();
{
HValue* new_length = BuildPushElement(object, argc, argument_elements,
FAST_HOLEY_ELEMENTS);
environment()->Push(new_length);
}
has_object_elements.Else();
{
IfBuilder has_double_elements(this);
has_double_elements.If<HCompareNumericAndBranch>(
kind, Add<HConstant>(FAST_HOLEY_DOUBLE_ELEMENTS), Token::LTE);
has_double_elements.Then();
{
HValue* new_length = BuildPushElement(object, argc, argument_elements,
FAST_HOLEY_DOUBLE_ELEMENTS);
environment()->Push(new_length);
}
has_double_elements.ElseDeopt(Deoptimizer::kFastArrayPushFailed);
has_double_elements.End();
}
has_object_elements.End();
}
has_smi_elements.End();
return environment()->Pop();
}
Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
@ -1184,36 +1395,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<AllocateHeapNumberStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapNumber(),
NOT_TENURED, HEAP_NUMBER_TYPE);
AddStoreMapConstant(result, isolate()->factory()->heap_number_map());
return result;
}
Handle<Code> AllocateHeapNumberStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
return result;
}
Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
@ -1418,31 +1599,6 @@ Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
Isolate* isolate = graph()->isolate();
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Type* type = stub->GetType(zone(), sentinel_map);
BuildCompareNil(GetParameter(0), type, &continuation, kEmbedMapsViaWeakCells);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
if_nil.Return(graph()->GetConstantFalse());
}
if_nil.End();
return continuation.IsTrueReachable() ? graph()->GetConstantTrue()
: graph()->GetConstantUndefined();
}
Handle<Code> CompareNilICStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
BinaryOpICState state = casted_stub()->state();
@ -1588,11 +1744,10 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
}
if_inputisprimitive.End();
// Convert the primitive to a string value.
ToStringDescriptor descriptor(isolate());
ToStringStub stub(isolate());
HValue* values[] = {context(), Pop()};
Push(AddUncasted<HCallWithDescriptor>(
Add<HConstant>(stub.GetCode()), 0, descriptor,
Add<HConstant>(stub.GetCode()), 0, stub.GetCallInterfaceDescriptor(),
Vector<HValue*>(values, arraysize(values))));
}
if_inputisstring.End();
@ -1706,10 +1861,9 @@ Handle<Code> StringAddStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
ToBooleanICStub* stub = casted_stub();
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->types());
if_true.Then();
@ -1719,11 +1873,7 @@ HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
return graph()->GetConstantFalse();
}
Handle<Code> ToBooleanStub::GenerateCode() {
return DoGenerateCode(this);
}
Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
@ -1855,7 +2005,7 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
HValue* receiver = GetParameter(ToObjectDescriptor::kReceiverIndex);
HValue* receiver = GetParameter(TypeConversionDescriptor::kArgumentIndex);
return BuildToObject(receiver);
}

2748
deps/v8/src/code-stubs.cc

File diff suppressed because it is too large

573
deps/v8/src/code-stubs.h

@ -23,8 +23,7 @@ namespace internal {
/* PlatformCodeStubs */ \
V(ArrayConstructor) \
V(BinaryOpICWithAllocationSite) \
V(CallApiFunction) \
V(CallApiAccessor) \
V(CallApiCallback) \
V(CallApiGetter) \
V(CallConstruct) \
V(CallIC) \
@ -38,7 +37,6 @@ namespace internal {
V(KeyedLoadICTrampoline) \
V(LoadICTrampoline) \
V(CallICTrampoline) \
V(LoadIndexedInterceptor) \
V(LoadIndexedString) \
V(MathPow) \
V(ProfileEntryHook) \
@ -46,11 +44,11 @@ namespace internal {
V(RegExpExec) \
V(StoreBufferOverflow) \
V(StoreElement) \
V(StringCompare) \
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
V(ToLength) \
V(NonNumberToNumber) \
V(StringToNumber) \
V(ToString) \
V(ToName) \
V(ToObject) \
@ -59,18 +57,16 @@ namespace internal {
V(VectorStoreIC) \
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(AllocateInNewSpace) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(BinaryOpIC) \
V(BinaryOpWithAllocationSite) \
V(CompareNilIC) \
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
V(FastArrayPush) \
V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
@ -96,20 +92,56 @@ namespace internal {
V(StoreGlobalViaContext) \
V(StoreScriptContextField) \
V(StringAdd) \
V(ToBoolean) \
V(ToBooleanIC) \
V(TransitionElementsKind) \
V(KeyedLoadIC) \
V(LoadIC) \
/* TurboFanCodeStubs */ \
V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(AllocateFloat32x4) \
V(AllocateInt32x4) \
V(AllocateUint32x4) \
V(AllocateBool32x4) \
V(AllocateInt16x8) \
V(AllocateUint16x8) \
V(AllocateBool16x8) \
V(AllocateInt8x16) \
V(AllocateUint8x16) \
V(AllocateBool8x16) \
V(StringLength) \
V(Add) \
V(Subtract) \
V(BitwiseAnd) \
V(BitwiseOr) \
V(BitwiseXor) \
V(LessThan) \
V(LessThanOrEqual) \
V(GreaterThan) \
V(GreaterThanOrEqual) \
V(Equal) \
V(NotEqual) \
V(StrictEqual) \
V(StrictNotEqual) \
V(StringEqual) \
V(StringNotEqual) \
V(StringLessThan) \
V(StringLessThanOrEqual) \
V(StringGreaterThan) \
V(StringGreaterThanOrEqual) \
V(ToBoolean) \
V(ToInteger) \
V(ToLength) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(LoadConstant) \
V(LoadFastElement) \
V(LoadField) \
V(LoadIndexedInterceptor) \
V(KeyedLoadSloppyArguments) \
V(KeyedStoreSloppyArguments) \
V(StoreField) \
V(StoreInterceptor) \
V(StoreGlobal) \
V(StoreTransition)
@ -157,13 +189,24 @@ namespace internal {
#define CODE_STUB_LIST_MIPS(V)
#endif
// List of code stubs only used on S390 platforms.
#ifdef V8_TARGET_ARCH_S390
#define CODE_STUB_LIST_S390(V) \
V(DirectCEntry) \
V(StoreRegistersState) \
V(RestoreRegistersState)
#else
#define CODE_STUB_LIST_S390(V)
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_ARM64(V) \
CODE_STUB_LIST_PPC(V) \
CODE_STUB_LIST_MIPS(V)
CODE_STUB_LIST_MIPS(V) \
CODE_STUB_LIST_S390(V)
static const int kHasReturnedMinusZeroSentinel = 1;
@ -347,11 +390,10 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
public: \
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
return DESC##Descriptor(isolate()); \
}; \
#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
public: \
void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
const override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@ -584,6 +626,8 @@ class RuntimeCallHelper {
#include "src/mips/code-stubs-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/code-stubs-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else
@ -625,12 +669,212 @@ class StringLengthStub : public TurboFanCodeStub {
InlineCacheState GetICState() const override { return MONOMORPHIC; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_CODE_STUB(StringLength, TurboFanCodeStub);
DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
};
class AddStub final : public TurboFanCodeStub {
public:
explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
DEFINE_TURBOFAN_CODE_STUB(Add, TurboFanCodeStub);
};
class SubtractStub final : public TurboFanCodeStub {
public:
explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
DEFINE_TURBOFAN_CODE_STUB(Subtract, TurboFanCodeStub);
};
class BitwiseAndStub final : public TurboFanCodeStub {
public:
explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
DEFINE_TURBOFAN_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
};
class BitwiseOrStub final : public TurboFanCodeStub {
public:
explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
DEFINE_TURBOFAN_CODE_STUB(BitwiseOr, TurboFanCodeStub);
};
class BitwiseXorStub final : public TurboFanCodeStub {
public:
explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
DEFINE_TURBOFAN_CODE_STUB(BitwiseXor, TurboFanCodeStub);
};
class LessThanStub final : public TurboFanCodeStub {
public:
explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(LessThan, TurboFanCodeStub);
};
class LessThanOrEqualStub final : public TurboFanCodeStub {
public:
explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
};
class GreaterThanStub final : public TurboFanCodeStub {
public:
explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(GreaterThan, TurboFanCodeStub);
};
class GreaterThanOrEqualStub final : public TurboFanCodeStub {
public:
explicit GreaterThanOrEqualStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
};
class EqualStub final : public TurboFanCodeStub {
public:
explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(Equal, TurboFanCodeStub);
};
class NotEqualStub final : public TurboFanCodeStub {
public:
explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(NotEqual, TurboFanCodeStub);
};
class StrictEqualStub final : public TurboFanCodeStub {
public:
explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StrictEqual, TurboFanCodeStub);
};
class StrictNotEqualStub final : public TurboFanCodeStub {
public:
explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
};
class StringEqualStub final : public TurboFanCodeStub {
public:
explicit StringEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringEqual, TurboFanCodeStub);
};
class StringNotEqualStub final : public TurboFanCodeStub {
public:
explicit StringNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringNotEqual, TurboFanCodeStub);
};
class StringLessThanStub final : public TurboFanCodeStub {
public:
explicit StringLessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringLessThan, TurboFanCodeStub);
};
class StringLessThanOrEqualStub final : public TurboFanCodeStub {
public:
explicit StringLessThanOrEqualStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringLessThanOrEqual, TurboFanCodeStub);
};
class StringGreaterThanStub final : public TurboFanCodeStub {
public:
explicit StringGreaterThanStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringGreaterThan, TurboFanCodeStub);
};
class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
public:
explicit StringGreaterThanOrEqualStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
};
class ToBooleanStub final : public TurboFanCodeStub {
public:
explicit ToBooleanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_TURBOFAN_CODE_STUB(ToBoolean, TurboFanCodeStub);
};
class ToIntegerStub final : public TurboFanCodeStub {
public:
explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_TURBOFAN_CODE_STUB(ToInteger, TurboFanCodeStub);
};
class ToLengthStub final : public TurboFanCodeStub {
public:
explicit ToLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_TURBOFAN_CODE_STUB(ToLength, TurboFanCodeStub);
};
class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
class LoadIndexedInterceptorStub : public TurboFanCodeStub {
public:
explicit LoadIndexedInterceptorStub(Isolate* isolate)
: TurboFanCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
};
enum StringAddFlags {
// Omit both parameter checks.
@ -658,7 +902,7 @@ class NumberToStringStub final : public HydrogenCodeStub {
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kNumber = 0;
DEFINE_CALL_INTERFACE_DESCRIPTOR(NumberToString);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
};
@ -873,12 +1117,29 @@ class GrowArrayElementsStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(GrowArrayElements, HydrogenCodeStub);
};
class FastArrayPushStub : public HydrogenCodeStub {
public:
explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastArrayPush);
DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
};
class InstanceOfStub final : public PlatformCodeStub {
public:
explicit InstanceOfStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
explicit InstanceOfStub(Isolate* isolate, bool es6_instanceof = false)
: PlatformCodeStub(isolate) {
minor_key_ = IsES6InstanceOfBits::encode(es6_instanceof);
}
bool is_es6_instanceof() const {
return IsES6InstanceOfBits::decode(minor_key_);
}
private:
class IsES6InstanceOfBits : public BitField<bool, 0, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
};
@ -1013,20 +1274,6 @@ class FunctionPrototypeStub : public PlatformCodeStub {
};
// TODO(mvstanton): Translate to hydrogen code stub.
class LoadIndexedInterceptorStub : public PlatformCodeStub {
public:
explicit LoadIndexedInterceptorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
Code::StubType GetStubType() const override { return Code::FAST; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
};
class LoadIndexedStringStub : public PlatformCodeStub {
public:
explicit LoadIndexedStringStub(Isolate* isolate)
@ -1418,48 +1665,36 @@ class StoreGlobalViaContextStub final : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
};
class CallApiFunctionStub : public PlatformCodeStub {
class CallApiCallbackStub : public PlatformCodeStub {
public:
explicit CallApiFunctionStub(Isolate* isolate, bool call_data_undefined)
: PlatformCodeStub(isolate) {
minor_key_ = CallDataUndefinedBits::encode(call_data_undefined);
}
private:
bool call_data_undefined() const {
return CallDataUndefinedBits::decode(minor_key_);
}
static const int kArgBits = 3;
static const int kArgMax = (1 << kArgBits) - 1;
class CallDataUndefinedBits : public BitField<bool, 0, 1> {};
// CallApiCallbackStub for regular setters and getters.
CallApiCallbackStub(Isolate* isolate, bool is_store, bool call_data_undefined,
bool is_lazy)
: CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store,
call_data_undefined, is_lazy) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
};
// CallApiCallbackStub for callback functions.
CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined)
: CallApiCallbackStub(isolate, argc, false, call_data_undefined, false) {}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
}
class CallApiAccessorStub : public PlatformCodeStub {
public:
CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined,
bool is_lazy)
private:
CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
bool call_data_undefined, bool is_lazy)
: PlatformCodeStub(isolate) {
CHECK(0 <= argc && argc <= kArgMax);
minor_key_ = IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(is_store ? 1 : 0) |
ArgumentBits::encode(argc) |
IsLazyAccessorBits::encode(is_lazy);
}
protected:
// For CallApiFunctionWithFixedArgsStub, see below.
static const int kArgBits = 3;
CallApiAccessorStub(Isolate* isolate, int argc, bool call_data_undefined)
: PlatformCodeStub(isolate) {
minor_key_ = IsStoreBits::encode(false) |
CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc);
}
private:
bool is_store() const { return IsStoreBits::decode(minor_key_); }
bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
bool call_data_undefined() const {
@ -1472,29 +1707,10 @@ class CallApiAccessorStub : public PlatformCodeStub {
class ArgumentBits : public BitField<int, 2, kArgBits> {};
class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
};
// TODO(dcarney): see if it's possible to remove this later without performance
// degradation.
// This is not a real stub, but a way of generating the CallApiAccessorStub
// (which has the same abi) which makes it clear that it is not an accessor.
class CallApiFunctionWithFixedArgsStub : public CallApiAccessorStub {
public:
static const int kMaxFixedArgs = (1 << kArgBits) - 1;
CallApiFunctionWithFixedArgsStub(Isolate* isolate, int argc,
bool call_data_undefined)
: CallApiAccessorStub(isolate, argc, call_data_undefined) {
DCHECK(0 <= argc && argc <= kMaxFixedArgs);
}
};
typedef ApiAccessorDescriptor ApiFunctionWithFixedArgsDescriptor;
class CallApiGetterStub : public PlatformCodeStub {
public:
explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@ -1701,96 +1917,6 @@ class CompareICStub : public PlatformCodeStub {
};
class CompareNilICStub : public HydrogenCodeStub {
public:
Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
Type* GetInputType(Zone* zone, Handle<Map> map);
CompareNilICStub(Isolate* isolate, NilValue nil) : HydrogenCodeStub(isolate) {
set_sub_minor_key(NilValueBits::encode(nil));
}
CompareNilICStub(Isolate* isolate, ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(isolate, init_state) {
set_sub_minor_key(ic_state);
}
static Handle<Code> GetUninitialized(Isolate* isolate,
NilValue nil) {
return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
}
InlineCacheState GetICState() const override {
State state = this->state();
if (state.Contains(GENERIC)) {
return MEGAMORPHIC;
} else if (state.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
} else {
return PREMONOMORPHIC;
}
}
Code::Kind GetCodeKind() const override { return Code::COMPARE_NIL_IC; }
ExtraICState GetExtraICState() const override { return sub_minor_key(); }
void UpdateStatus(Handle<Object> object);
bool IsMonomorphic() const { return state().Contains(MONOMORPHIC_MAP); }
NilValue nil_value() const { return NilValueBits::decode(sub_minor_key()); }
void ClearState() {
set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
}
void PrintState(std::ostream& os) const override; // NOLINT
void PrintBaseName(std::ostream& os) const override; // NOLINT
private:
CompareNilICStub(Isolate* isolate, NilValue nil,
InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {
set_sub_minor_key(NilValueBits::encode(nil));
}
enum CompareNilType {
UNDEFINED,
NULL_TYPE,
MONOMORPHIC_MAP,
GENERIC,
NUMBER_OF_TYPES
};
// At most 6 different types can be distinguished, because the Code object
// only has room for a single byte to hold a set and there are two more
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
class State : public EnumSet<CompareNilType, byte> {
public:
State() : EnumSet<CompareNilType, byte>(0) { }
explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
};
friend std::ostream& operator<<(std::ostream& os, const State& s);
State state() const { return State(TypesBits::decode(sub_minor_key())); }
class NilValueBits : public BitField<NilValue, 0, 1> {};
class TypesBits : public BitField<byte, 1, NUMBER_OF_TYPES> {};
friend class CompareNilIC;
DEFINE_CALL_INTERFACE_DESCRIPTOR(CompareNil);
DEFINE_HYDROGEN_CODE_STUB(CompareNilIC, HydrogenCodeStub);
};
std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s);
class CEntryStub : public PlatformCodeStub {
public:
CEntryStub(Isolate* isolate, int result_size,
@ -2499,28 +2625,45 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
};
class AllocateHeapNumberStub final : public HydrogenCodeStub {
class AllocateHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateHeapNumberStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {}
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
DEFINE_HYDROGEN_CODE_STUB(AllocateHeapNumber, HydrogenCodeStub);
DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
};
class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
public:
explicit AllocateMutableHeapNumberStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {}
: TurboFanCodeStub(isolate) {}
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub);
};
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
};
#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
class Allocate##Type##Stub : public TurboFanCodeStub { \
public: \
explicit Allocate##Type##Stub(Isolate* isolate) \
: TurboFanCodeStub(isolate) {} \
\
void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
void GenerateAssembly( \
compiler::CodeStubAssembler* assembler) const override; \
\
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
};
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
class AllocateInNewSpaceStub final : public HydrogenCodeStub {
public:
@ -2727,8 +2870,7 @@ class StoreElementStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
};
class ToBooleanStub: public HydrogenCodeStub {
class ToBooleanICStub : public HydrogenCodeStub {
public:
enum Type {
UNDEFINED,
@ -2755,14 +2897,14 @@ class ToBooleanStub: public HydrogenCodeStub {
bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const {
return Contains(ToBooleanStub::SPEC_OBJECT);
return Contains(ToBooleanICStub::SPEC_OBJECT);
}
bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
ToBooleanStub(Isolate* isolate, ExtraICState state)
ToBooleanICStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
}
@ -2776,7 +2918,7 @@ class ToBooleanStub: public HydrogenCodeStub {
bool SometimesSetsUpAFrame() override { return false; }
static Handle<Code> GetUninitialized(Isolate* isolate) {
return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
}
ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
@ -2790,19 +2932,16 @@ class ToBooleanStub: public HydrogenCodeStub {
}
private:
ToBooleanStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {
}
ToBooleanICStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {}
class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
};
std::ostream& operator<<(std::ostream& os, const ToBooleanStub::Types& t);
std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
@ -2910,17 +3049,25 @@ class ToNumberStub final : public PlatformCodeStub {
public:
explicit ToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToNumber);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToNumber, PlatformCodeStub);
};
class NonNumberToNumberStub final : public PlatformCodeStub {
public:
explicit NonNumberToNumberStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
class ToLengthStub final : public PlatformCodeStub {
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(NonNumberToNumber, PlatformCodeStub);
};
class StringToNumberStub final : public PlatformCodeStub {
public:
explicit ToLengthStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
explicit StringToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToLength);
DEFINE_PLATFORM_CODE_STUB(ToLength, PlatformCodeStub);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(StringToNumber, PlatformCodeStub);
};
@ -2928,7 +3075,7 @@ class ToStringStub final : public PlatformCodeStub {
public:
explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToString);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
};
@ -2937,7 +3084,7 @@ class ToNameStub final : public PlatformCodeStub {
public:
explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToName);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
};
@ -2946,20 +3093,10 @@ class ToObjectStub final : public HydrogenCodeStub {
public:
explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToObject);
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
};
class StringCompareStub : public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(StringCompare);
DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
};
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
#undef DEFINE_PLATFORM_CODE_STUB
#undef DEFINE_HANDLER_CODE_STUB

2
deps/v8/src/codegen.h

@ -56,6 +56,8 @@
#include "src/mips/codegen-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else

247
deps/v8/src/collector.h

@ -0,0 +1,247 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COLLECTOR_H_
#define V8_COLLECTOR_H_
#include "src/checks.h"
#include "src/list.h"
#include "src/vector.h"
namespace v8 {
namespace internal {
/*
* A class that collects values into a backing store.
* Specialized versions of the class can allow access to the backing store
* in different ways.
* There is no guarantee that the backing store is contiguous (and, as a
* consequence, no guarantees that consecutively added elements are adjacent
* in memory). The collector may move elements unless it has guaranteed not
* to.
*/
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
class Collector {
public:
explicit Collector(int initial_capacity = kMinCapacity)
: index_(0), size_(0) {
current_chunk_ = Vector<T>::New(initial_capacity);
}
virtual ~Collector() {
// Free backing store (in reverse allocation order).
current_chunk_.Dispose();
for (int i = chunks_.length() - 1; i >= 0; i--) {
chunks_.at(i).Dispose();
}
}
// Add a single element.
inline void Add(T value) {
if (index_ >= current_chunk_.length()) {
Grow(1);
}
current_chunk_[index_] = value;
index_++;
size_++;
}
// Add a block of contiguous elements and return a Vector backed by the
// memory area.
// A basic Collector will keep this vector valid as long as the Collector
// is alive.
inline Vector<T> AddBlock(int size, T initial_value) {
DCHECK(size > 0);
if (size > current_chunk_.length() - index_) {
Grow(size);
}
T* position = current_chunk_.start() + index_;
index_ += size;
size_ += size;
for (int i = 0; i < size; i++) {
position[i] = initial_value;
}
return Vector<T>(position, size);
}
// Add a contiguous block of elements and return a vector backed
// by the added block.
// A basic Collector will keep this vector valid as long as the Collector
// is alive.
inline Vector<T> AddBlock(Vector<const T> source) {
if (source.length() > current_chunk_.length() - index_) {
Grow(source.length());
}
T* position = current_chunk_.start() + index_;
index_ += source.length();
size_ += source.length();
for (int i = 0; i < source.length(); i++) {
position[i] = source[i];
}
return Vector<T>(position, source.length());
}
// Write the contents of the collector into the provided vector.
void WriteTo(Vector<T> destination) {
DCHECK(size_ <= destination.length());
int position = 0;
for (int i = 0; i < chunks_.length(); i++) {
Vector<T> chunk = chunks_.at(i);
for (int j = 0; j < chunk.length(); j++) {
destination[position] = chunk[j];
position++;
}
}
for (int i = 0; i < index_; i++) {
destination[position] = current_chunk_[i];
position++;
}
}
// Allocate a single contiguous vector, copy all the collected
// elements to the vector, and return it.
// The caller is responsible for freeing the memory of the returned
// vector (e.g., using Vector::Dispose).
Vector<T> ToVector() {
Vector<T> new_store = Vector<T>::New(size_);
WriteTo(new_store);
return new_store;
}
// Resets the collector to be empty.
virtual void Reset() {
for (int i = chunks_.length() - 1; i >= 0; i--) {
chunks_.at(i).Dispose();
}
chunks_.Rewind(0);
index_ = 0;
size_ = 0;
}
// Total number of elements added to collector so far.
inline int size() { return size_; }
protected:
static const int kMinCapacity = 16;
List<Vector<T> > chunks_;
Vector<T> current_chunk_; // Block of memory currently being written into.
int index_; // Current index in current chunk.
int size_; // Total number of elements in collector.
// Creates a new current chunk, and stores the old chunk in the chunks_ list.
void Grow(int min_capacity) {
DCHECK(growth_factor > 1);
int new_capacity;
int current_length = current_chunk_.length();
if (current_length < kMinCapacity) {
// The collector started out as empty.
new_capacity = min_capacity * growth_factor;
if (new_capacity < kMinCapacity) new_capacity = kMinCapacity;
} else {
int growth = current_length * (growth_factor - 1);
if (growth > max_growth) {
growth = max_growth;
}
new_capacity = current_length + growth;
if (new_capacity < min_capacity) {
new_capacity = min_capacity + growth;
}
}
NewChunk(new_capacity);
DCHECK(index_ + min_capacity <= current_chunk_.length());
}
// Before replacing the current chunk, give a subclass the option to move
// some of the current data into the new chunk. The function may update
// the current index_ value to represent data no longer in the current chunk.
// Returns the initial index of the new chunk (after copied data).
virtual void NewChunk(int new_capacity) {
Vector<T> new_chunk = Vector<T>::New(new_capacity);
if (index_ > 0) {
chunks_.Add(current_chunk_.SubVector(0, index_));
} else {
current_chunk_.Dispose();
}
current_chunk_ = new_chunk;
index_ = 0;
}
};
/*
* A collector that allows sequences of values to be guaranteed to
* stay consecutive.
* If the backing store grows while a sequence is active, the current
* sequence might be moved, but after the sequence is ended, it will
* not move again.
* NOTICE: Blocks allocated using Collector::AddBlock(int) can move
* as well, if inside an active sequence where another element is added.
*/
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
class SequenceCollector : public Collector<T, growth_factor, max_growth> {
public:
explicit SequenceCollector(int initial_capacity)
: Collector<T, growth_factor, max_growth>(initial_capacity),
sequence_start_(kNoSequence) {}
virtual ~SequenceCollector() {}
void StartSequence() {
DCHECK(sequence_start_ == kNoSequence);
sequence_start_ = this->index_;
}
Vector<T> EndSequence() {
DCHECK(sequence_start_ != kNoSequence);
int sequence_start = sequence_start_;
sequence_start_ = kNoSequence;
if (sequence_start == this->index_) return Vector<T>();
return this->current_chunk_.SubVector(sequence_start, this->index_);
}
// Drops the currently added sequence, and all collected elements in it.
void DropSequence() {
DCHECK(sequence_start_ != kNoSequence);
int sequence_length = this->index_ - sequence_start_;
this->index_ = sequence_start_;
this->size_ -= sequence_length;
sequence_start_ = kNoSequence;
}
virtual void Reset() {
sequence_start_ = kNoSequence;
this->Collector<T, growth_factor, max_growth>::Reset();
}
private:
static const int kNoSequence = -1;
int sequence_start_;
// Move the currently active sequence to the new chunk.
virtual void NewChunk(int new_capacity) {
if (sequence_start_ == kNoSequence) {
// Fall back on default behavior if no sequence has been started.
this->Collector<T, growth_factor, max_growth>::NewChunk(new_capacity);
return;
}
int sequence_length = this->index_ - sequence_start_;
Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
DCHECK(sequence_length < new_chunk.length());
for (int i = 0; i < sequence_length; i++) {
new_chunk[i] = this->current_chunk_[sequence_start_ + i];
}
if (sequence_start_ > 0) {
this->chunks_.Add(this->current_chunk_.SubVector(0, sequence_start_));
} else {
this->current_chunk_.Dispose();
}
this->current_chunk_ = new_chunk;
this->index_ = sequence_length;
sequence_start_ = 0;
}
};
} // namespace internal
} // namespace v8
#endif // V8_COLLECTOR_H_

853
deps/v8/src/compiler.cc

File diff suppressed because it is too large

274
deps/v8/src/compiler.h

@ -9,7 +9,6 @@
#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
#include "src/signature.h"
#include "src/source-position.h"
#include "src/zone.h"
@ -17,10 +16,107 @@ namespace v8 {
namespace internal {
// Forward declarations.
class CompilationInfo;
class JavaScriptFrame;
class OptimizedCompileJob;
class ParseInfo;
class ScriptData;
// The V8 compiler API.
//
// This is the central hub for dispatching to the various compilers within V8.
// Logic for which compiler to choose and how to wire compilation results into
// the object heap should be kept inside this class.
//
// General strategy: Scripts are translated into anonymous functions w/o
// parameters which then can be executed. If the source code contains other
// functions, they might be compiled and allocated as part of the compilation
// of the source code or deferred for lazy compilation at a later point.
class Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// ===========================================================================
// The following family of methods ensures a given function is compiled. The
// general contract is that failures will be reported by returning {false},
// whereas successful compilation ensures the {is_compiled} predicate on the
// given function holds (except for live-edit, which compiles the world).
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
static void CompileForLiveEdit(Handle<Script> script);
// Generate and install code from previously queued optimization job.
static void FinalizeOptimizedCompileJob(OptimizedCompileJob* job);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
// offer this chance, optimized closure instantiation will not call this.
static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
// Rewrite, analyze scopes, and renumber.
static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// ===========================================================================
// The following family of methods instantiates new functions for scripts or
// function literals. The decision whether those functions will be compiled,
// is left to the discretion of the compiler.
//
// Please note this interface returns shared function infos. This means you
// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
// real function with a context.
// Create a (bound) function for a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int line_offset, int column_offset = 0,
Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
// Create a shared function info object for a String source within a context.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code, bool is_module);
// Create a shared function info object for a Script that has already been
// parsed while the script was being loaded from a streamed source.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForStreamedScript(
Handle<Script> script, ParseInfo* info, int source_length);
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
// Create a shared function info object for a native function literal.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
v8::Extension* extension, Handle<String> name);
// ===========================================================================
// The following family of methods provides support for OSR. Code generated
// for entry via OSR might not be suitable for normal entry, hence will be
// returned directly to the caller.
//
// Please note this interface is the only part dealing with {Code} objects
// directly. Other methods are agnostic to {Code} and can use an interpreter
// instead of generating JIT code for a function at all.
// Generate and return optimized code for OSR, or empty handle on failure.
MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
Handle<JSFunction> function, BailoutId osr_ast_id,
JavaScriptFrame* osr_frame);
};
struct InlinedFunctionInfo {
InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
@ -101,7 +197,6 @@ class CompilationInfo {
Handle<Code> code() const { return code_; }
Code::Flags code_flags() const { return code_flags_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_parameters_including_this() const;
@ -116,6 +211,11 @@ class CompilationInfo {
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
Handle<AbstractCode> abstract_code() const {
return has_bytecode_array() ? Handle<AbstractCode>::cast(bytecode_array())
: Handle<AbstractCode>::cast(code());
}
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
@ -218,14 +318,10 @@ class CompilationInfo {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
// since StaticMarkingVisitor::IsFlushable only flushes proper functions.
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
!is_debug() && output_code_kind() == Code::FUNCTION;
return FLAG_optimize_for_size && FLAG_age_code && !is_debug() &&
output_code_kind() == Code::FUNCTION;
}
void EnsureFeedbackVector();
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
void SetCode(Handle<Code> code) { code_ = code; }
void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
@ -261,10 +357,9 @@ class CompilationInfo {
code_flags_ =
Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
}
void SetOptimizingForOsr(BailoutId osr_ast_id, Handle<Code> unoptimized) {
void SetOptimizingForOsr(BailoutId osr_ast_id) {
SetOptimizing();
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
}
// Deoptimization support.
@ -288,7 +383,7 @@ class CompilationInfo {
}
void ReopenHandlesInNewHandleScope() {
unoptimized_code_ = Handle<Code>(*unoptimized_code_);
// Empty for now but will be needed once fields move from ParseInfo.
}
void AbortOptimization(BailoutReason reason) {
@ -377,12 +472,26 @@ class CompilationInfo {
return Code::ExtractKindFromFlags(code_flags_);
}
StackFrame::Type GetOutputStackFrameType() const;
protected:
ParseInfo* parse_info_;
void DisableFutureOptimization() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
// If Crankshaft tried to optimize this function, bailed out, and
// doesn't want to try again, then use TurboFan next time.
if (!shared_info()->dont_crankshaft() &&
bailout_reason() != kOptimizedTooManyTimes) {
shared_info()->set_dont_crankshaft(true);
if (FLAG_trace_opt) {
PrintF("[disabled Crankshaft for ");
shared_info()->ShortPrint();
PrintF(", reason: %s]\n", GetBailoutReason(bailout_reason()));
}
} else {
shared_info()->DisableOptimization(bailout_reason());
}
}
}
@ -421,16 +530,9 @@ class CompilationInfo {
// The compiled code.
Handle<Code> code_;
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
Handle<TypeFeedbackVector> feedback_vector_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
BailoutId osr_ast_id_;
// The unoptimized code we patched for OSR may not be the shared code
// afterwards, since we may need to compile it again to include deoptimization
// data. Keep track which code we patched.
Handle<Code> unoptimized_code_;
// Holds the bytecode array generated by the interpreter.
// TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
@ -475,25 +577,7 @@ class CompilationInfo {
};
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
class CompilationHandleScope BASE_EMBEDDED {
public:
explicit CompilationHandleScope(CompilationInfo* info)
: deferred_(info->isolate()), info_(info) {}
~CompilationHandleScope() {
info_->set_deferred_handles(deferred_.Detach());
}
private:
DeferredHandleScope deferred_;
CompilationInfo* info_;
};
class HGraph;
class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@ -505,12 +589,7 @@ class LChunk;
class OptimizedCompileJob: public ZoneObject {
public:
explicit OptimizedCompileJob(CompilationInfo* info)
: info_(info),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
last_status_(FAILED),
awaiting_install_(false) { }
: info_(info), graph_(NULL), chunk_(NULL), last_status_(FAILED) {}
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
@ -534,23 +613,14 @@ class OptimizedCompileJob: public ZoneObject {
return SetLastStatus(BAILED_OUT);
}
void WaitForInstall() {
DCHECK(info_->is_osr());
awaiting_install_ = true;
}
bool IsWaitingForInstall() { return awaiting_install_; }
private:
CompilationInfo* info_;
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
base::TimeDelta time_taken_to_create_graph_;
base::TimeDelta time_taken_to_optimize_;
base::TimeDelta time_taken_to_codegen_;
Status last_status_;
bool awaiting_install_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
@ -575,106 +645,6 @@ class OptimizedCompileJob: public ZoneObject {
};
};
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w/o
// parameters which then can be executed. If the source code contains other
// functions, they will be compiled and allocated as part of the compilation
// of the source code.
// Please note this interface returns shared function infos. This means you
// need to call Factory::NewFunctionFromSharedFunctionInfo before you have a
// real function with a context.
class Compiler : public AllStatic {
public:
MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
static void CompileForLiveEdit(Handle<Script> script);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
// Rewrite, analyze scopes, and renumber.
static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// Compile a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int line_offset, int column_offset = 0,
Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
// Compile a String source within a context.
static Handle<SharedFunctionInfo> CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code, bool is_module);
static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
ParseInfo* info,
int source_length);
// Create a shared function info object (the code may be lazily compiled).
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
// Create a shared function info object for a native function literal.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
v8::Extension* extension, Handle<String> name);
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// Generate and return optimized code or start a concurrent optimization job.
// In the latter case, return the InOptimizationQueue builtin. On failure,
// return the empty handle.
MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
Handle<JSFunction> function, ConcurrencyMode mode,
BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr);
// Generate and return code from previously queued optimization job.
// On failure, return the empty handle.
MUST_USE_RESULT static MaybeHandle<Code> GetConcurrentlyOptimizedCode(
OptimizedCompileJob* job);
};
class CompilationPhase BASE_EMBEDDED {
public:
CompilationPhase(const char* name, CompilationInfo* info);
~CompilationPhase();
protected:
bool ShouldProduceTraceOutput() const;
const char* name() const { return name_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info()->isolate(); }
Zone* zone() { return &zone_; }
private:
const char* name_;
CompilationInfo* info_;
Zone zone_;
size_t info_zone_start_allocation_size_;
base::ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
} // namespace internal
} // namespace v8

8
deps/v8/src/compiler/access-info.cc

@ -192,12 +192,12 @@ bool AccessInfoFactory::ComputeElementAccessInfos(
MapTransitionList transitions(maps.length());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
Handle<Map> transition_target =
Map::FindTransitionedMap(map, &possible_transition_targets);
if (transition_target.is_null()) {
Map* transition_target =
map->FindElementsKindTransitionedMap(&possible_transition_targets);
if (transition_target == nullptr) {
receiver_maps.Add(map);
} else {
transitions.push_back(std::make_pair(map, transition_target));
transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
}

180
deps/v8/src/compiler/arm/code-generator-arm.cc

@ -54,6 +54,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
case kFlags_deoptimize:
case kFlags_set:
return SetCC;
case kFlags_none:
@ -149,8 +150,11 @@ class ArmOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
FrameOffset offset = frame_access_state()->GetFrameOffset(
AllocatedOperand::cast(op)->index());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
MemOperand SlotToMemOperand(int slot) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@ -164,7 +168,9 @@ class OutOfLineLoadFloat32 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ vmov(result_, std::numeric_limits<float>::quiet_NaN());
// Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
__ vmov(result_, -1.0f);
__ vsqrt(result_, result_);
}
private:
@ -178,7 +184,9 @@ class OutOfLineLoadFloat64 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
// Compute sqrt(-1.0), which results in a quiet double-precision NaN.
__ vmov(result_, -1.0);
__ vsqrt(result_, result_);
}
private:
@ -222,7 +230,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode) {}
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@ -236,7 +245,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (!frame()->needs_frame()) {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
}
@ -249,7 +258,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, Operand(index_));
}
__ CallStub(&stub);
if (!frame()->needs_frame()) {
if (must_save_lr_) {
__ Pop(lr);
}
}
@ -262,6 +271,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
};
@ -378,6 +388,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
void CodeGenerator::AssembleSetupStackPointer() {}
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
@ -394,7 +409,7 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
__ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
if (frame()->needs_frame()) {
if (frame_access_state()->has_frame()) {
if (FLAG_enable_embedded_constant_pool) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
}
@ -404,14 +419,39 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
frame_access_state()->SetFrameAccessToSP();
}
void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Register scratch1,
Register scratch2,
Register scratch3) {
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Label done;
// Check if current frame is an arguments adaptor frame.
__ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &done);
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
__ ldr(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3);
__ bind(&done);
}
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
masm()->MaybeCheckConstPool();
switch (ArchOpcodeField::decode(instr->opcode())) {
__ MaybeCheckConstPool();
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
@ -427,9 +467,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@ -458,6 +504,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@ -468,6 +515,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@ -535,7 +587,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchParentFramePointer:
if (frame_access_state()->frame()->needs_frame()) {
if (frame_access_state()->has_frame()) {
__ ldr(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
@ -742,6 +794,67 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ teq(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmAddPair:
// i.InputRegister(0) ... left low word.
// i.InputRegister(1) ... left high word.
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
__ add(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
SBit::SetCC);
__ adc(i.OutputRegister(1), i.InputRegister(1),
Operand(i.InputRegister(3)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmSubPair:
// i.InputRegister(0) ... left low word.
// i.InputRegister(1) ... left high word.
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
__ sub(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2),
SBit::SetCC);
__ sbc(i.OutputRegister(1), i.InputRegister(1),
Operand(i.InputRegister(3)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmMulPair:
// i.InputRegister(0) ... left low word.
// i.InputRegister(1) ... left high word.
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
__ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(2));
__ mla(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(3),
i.OutputRegister(1));
__ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
i.OutputRegister(1));
break;
case kArmLslPair:
if (instr->InputAt(2)->IsImmediate()) {
__ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
__ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmLsrPair:
if (instr->InputAt(2)->IsImmediate()) {
__ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
__ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmAsrPair:
if (instr->InputAt(2)->IsImmediate()) {
__ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
__ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmVcmpF32:
if (instr->InputAt(1)->IsDoubleRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
@ -1155,29 +1268,32 @@ void CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall()) {
if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
__ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
__ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
} else {
__ Push(lr, fp);
__ mov(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
} else {
__ Push(lr, fp);
__ mov(fp, sp);
__ StubPrologue(info()->GetOutputStackFrameType());
}
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
} else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@ -1247,15 +1363,15 @@ void CodeGenerator::AssembleReturn() {
}
if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL);
} else if (frame()->needs_frame()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
return;
} else {
__ bind(&return_label_);
__ LeaveFrame(StackFrame::MANUAL);
AssembleDeconstructFrame();
}
}
__ Ret(pop_count);
@ -1311,9 +1427,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
Heap::RootListIndex index;
int offset;
if (IsMaterializableFromFrame(src_object, &offset)) {
__ ldr(dst, MemOperand(fp, offset));
int slot;
if (IsMaterializableFromFrame(src_object, &slot)) {
__ ldr(dst, g.SlotToMemOperand(slot));
} else if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {

6
deps/v8/src/compiler/arm/instruction-codes-arm.h

@ -46,6 +46,12 @@ namespace compiler {
V(ArmUxtab) \
V(ArmRbit) \
V(ArmUxtah) \
V(ArmAddPair) \
V(ArmSubPair) \
V(ArmMulPair) \
V(ArmLslPair) \
V(ArmLsrPair) \
V(ArmAsrPair) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
V(ArmVsubF32) \

6
deps/v8/src/compiler/arm/instruction-scheduler-arm.cc

@ -48,6 +48,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmUxtab:
case kArmUxtah:
case kArmRbit:
case kArmAddPair:
case kArmSubPair:
case kArmMulPair:
case kArmLslPair:
case kArmLsrPair:
case kArmAsrPair:
case kArmVcmpF32:
case kArmVaddF32:
case kArmVsubF32:

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save