Browse Source

deps: update v8 to 4.3.61.21

* @indutny's SealHandleScope patch (484bebc38319fc7c622478037922ad73b2edcbf9)
  has been cherry picked onto the top of V8 to make it compile.
* There's some test breakage in contextify.
* This was merged at the request of the TC.

PR-URL: https://github.com/iojs/io.js/pull/1632
v4.0.0-rc
Chris Dickinson 10 years ago
committed by Rod Vagg
parent
commit
d58e780504
  1. 1
      deps/v8/.gitignore
  2. 1
      deps/v8/AUTHORS
  3. 116
      deps/v8/BUILD.gn
  4. 446
      deps/v8/ChangeLog
  5. 19
      deps/v8/DEPS
  6. 3
      deps/v8/Makefile
  7. 8
      deps/v8/Makefile.android
  8. 1
      deps/v8/PRESUBMIT.py
  9. 6
      deps/v8/README.md
  10. 18
      deps/v8/build/android.gypi
  11. 8
      deps/v8/build/detect_v8_host_arch.py
  12. 4
      deps/v8/build/features.gypi
  13. 1
      deps/v8/build/get_landmines.py
  14. 52
      deps/v8/build/gyp_environment.py
  15. 41
      deps/v8/build/gyp_v8
  16. 9
      deps/v8/build/landmine_utils.py
  17. 178
      deps/v8/build/landmines.py
  18. 89
      deps/v8/build/standalone.gypi
  19. 13
      deps/v8/build/toolchain.gypi
  20. 15
      deps/v8/include/v8-debug.h
  21. 33
      deps/v8/include/v8-profiler.h
  22. 138
      deps/v8/include/v8-util.h
  23. 4
      deps/v8/include/v8-version.h
  24. 1162
      deps/v8/include/v8.h
  25. 17
      deps/v8/include/v8config.h
  26. 2
      deps/v8/src/DEPS
  27. 97
      deps/v8/src/accessors.cc
  28. 6
      deps/v8/src/api-natives.cc
  29. 3154
      deps/v8/src/api.cc
  30. 12
      deps/v8/src/api.h
  31. 39
      deps/v8/src/arm/assembler-arm-inl.h
  32. 26
      deps/v8/src/arm/assembler-arm.cc
  33. 11
      deps/v8/src/arm/assembler-arm.h
  34. 226
      deps/v8/src/arm/builtins-arm.cc
  35. 392
      deps/v8/src/arm/code-stubs-arm.cc
  36. 13
      deps/v8/src/arm/cpu-arm.cc
  37. 42
      deps/v8/src/arm/debug-arm.cc
  38. 2
      deps/v8/src/arm/deoptimizer-arm.cc
  39. 3
      deps/v8/src/arm/disasm-arm.cc
  40. 5
      deps/v8/src/arm/frames-arm.h
  41. 215
      deps/v8/src/arm/full-codegen-arm.cc
  42. 6
      deps/v8/src/arm/interface-descriptors-arm.cc
  43. 18
      deps/v8/src/arm/lithium-arm.cc
  44. 24
      deps/v8/src/arm/lithium-arm.h
  45. 145
      deps/v8/src/arm/lithium-codegen-arm.cc
  46. 139
      deps/v8/src/arm/macro-assembler-arm.cc
  47. 24
      deps/v8/src/arm/macro-assembler-arm.h
  48. 3
      deps/v8/src/arm/simulator-arm.cc
  49. 46
      deps/v8/src/arm64/assembler-arm64-inl.h
  50. 84
      deps/v8/src/arm64/assembler-arm64.cc
  51. 23
      deps/v8/src/arm64/assembler-arm64.h
  52. 237
      deps/v8/src/arm64/builtins-arm64.cc
  53. 395
      deps/v8/src/arm64/code-stubs-arm64.cc
  54. 41
      deps/v8/src/arm64/debug-arm64.cc
  55. 2
      deps/v8/src/arm64/deoptimizer-arm64.cc
  56. 5
      deps/v8/src/arm64/frames-arm64.h
  57. 226
      deps/v8/src/arm64/full-codegen-arm64.cc
  58. 22
      deps/v8/src/arm64/instructions-arm64.cc
  59. 41
      deps/v8/src/arm64/instructions-arm64.h
  60. 9
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  61. 20
      deps/v8/src/arm64/lithium-arm64.cc
  62. 26
      deps/v8/src/arm64/lithium-arm64.h
  63. 201
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  64. 21
      deps/v8/src/arm64/lithium-codegen-arm64.h
  65. 155
      deps/v8/src/arm64/macro-assembler-arm64.cc
  66. 37
      deps/v8/src/arm64/macro-assembler-arm64.h
  67. 2
      deps/v8/src/array.js
  68. 8
      deps/v8/src/arraybuffer.js
  69. 62
      deps/v8/src/assembler.cc
  70. 31
      deps/v8/src/assembler.h
  71. 12
      deps/v8/src/ast-numbering.cc
  72. 66
      deps/v8/src/ast-value-factory.h
  73. 95
      deps/v8/src/ast.cc
  74. 172
      deps/v8/src/ast.h
  75. 45
      deps/v8/src/background-parsing-task.cc
  76. 13
      deps/v8/src/background-parsing-task.h
  77. 15
      deps/v8/src/bailout-reason.h
  78. 13
      deps/v8/src/base/bits.h
  79. 2
      deps/v8/src/base/cpu.cc
  80. 25
      deps/v8/src/base/logging.cc
  81. 6
      deps/v8/src/base/platform/platform-freebsd.cc
  82. 5
      deps/v8/src/base/platform/platform-posix.cc
  83. 5
      deps/v8/src/base/platform/platform-win32.cc
  84. 2
      deps/v8/src/base/platform/platform.h
  85. 268
      deps/v8/src/bootstrapper.cc
  86. 13
      deps/v8/src/builtins.cc
  87. 6
      deps/v8/src/builtins.h
  88. 7
      deps/v8/src/char-predicates-inl.h
  89. 2
      deps/v8/src/char-predicates.h
  90. 56
      deps/v8/src/code-factory.cc
  91. 11
      deps/v8/src/code-factory.h
  92. 237
      deps/v8/src/code-stubs-hydrogen.cc
  93. 24
      deps/v8/src/code-stubs.cc
  94. 66
      deps/v8/src/code-stubs.h
  95. 38
      deps/v8/src/codegen.cc
  96. 41
      deps/v8/src/collection.js
  97. 1
      deps/v8/src/compilation-cache.cc
  98. 579
      deps/v8/src/compiler.cc
  99. 390
      deps/v8/src/compiler.h
  100. 24
      deps/v8/src/compiler/access-builder.cc

1
deps/v8/.gitignore

@ -24,6 +24,7 @@
.cproject
.d8_history
.gclient_entries
.landmines
.project
.pydevproject
.settings

1
deps/v8/AUTHORS

@ -64,6 +64,7 @@ Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>

116
deps/v8/BUILD.gn

@ -2,6 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
import("//build/config/mips.gni")
# Because standalone V8 builds are not supported, assume this is part of a
# Chromium build.
import("//build/module_args/v8.gni")
@ -18,10 +22,23 @@ v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_snapshot = true
v8_target_arch = cpu_arch
v8_target_arch = target_cpu
v8_random_seed = "314159265"
v8_toolset_for_d8 = "host"
# The snapshot needs to be compiled for the host, but compiled with
# a toolchain that matches the bit-width of the target.
#
# TODO(GYP): For now we only support 32-bit little-endian target builds from an
# x64 Linux host. Eventually we need to support all of the host/target
# configurations v8 runs on.
if (host_cpu == "x64" && host_os == "linux" &&
(target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86")) {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
} else {
snapshot_toolchain = default_toolchain
}
###############################################################################
# Configurations
#
@ -96,37 +113,49 @@ config("toolchain") {
defines = []
cflags = []
# TODO(jochen): Add support for arm subarchs, mips, mipsel.
# TODO(jochen): Add support for arm subarchs, mips, mipsel, mips64el.
if (v8_target_arch == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
if (arm_version == 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3-d16") {
defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
]
}
if (arm_fpu == "neon") {
if (current_cpu == "arm") {
if (arm_version == 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
}
if (arm_fpu == "vfpv3-d16") {
defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
} else if (arm_fpu == "vfpv3") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
]
} else if (arm_fpu == "neon") {
defines += [
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
"CAN_USE_NEON",
]
}
} else {
# These defines ares used for the ARM simulator.
defines += [
"CAN_USE_ARMV7_INSTRUCTIONS",
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
"CAN_USE_NEON",
"USE_EABI_HARDFLOAT=0",
]
}
# TODO(jochen): Add support for arm_test_noprobe.
# TODO(jochen): Add support for cpu_arch != v8_target_arch/
}
if (v8_target_arch == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
if (v8_target_arch == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
}
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
}
if (v8_target_arch == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
}
@ -173,8 +202,8 @@ action("js2c") {
"src/array.js",
"src/string.js",
"src/uri.js",
"src/third_party/fdlibm/fdlibm.js",
"src/math.js",
"src/third_party/fdlibm/fdlibm.js",
"src/date.js",
"src/regexp.js",
"src/arraybuffer.js",
@ -192,6 +221,7 @@ action("js2c") {
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/templates.js",
"src/macros.py",
]
@ -230,13 +260,12 @@ action("js2c_experimental") {
"src/macros.py",
"src/proxy.js",
"src/generator.js",
"src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
"src/harmony-tostring.js",
"src/harmony-templates.js",
"src/harmony-regexp.js",
"src/harmony-reflect.js"
]
outputs = [
@ -322,7 +351,7 @@ action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":mksnapshot($host_toolchain)",
":mksnapshot($snapshot_toolchain)",
]
script = "tools/run.py"
@ -332,7 +361,7 @@ action("run_mksnapshot") {
]
args = [
"./" + rebase_path(get_label_info(":mksnapshot($host_toolchain)",
"./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--log-snapshot-positions",
@ -373,7 +402,7 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"src/snapshot-empty.cc",
"src/snapshot/snapshot-empty.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@ -423,8 +452,8 @@ if (v8_use_external_startup_data) {
]
sources = [
"src/natives-external.cc",
"src/snapshot-external.cc",
"src/snapshot/natives-external.cc",
"src/snapshot/snapshot-external.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@ -535,9 +564,7 @@ source_set("v8_base") {
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/generic-algorithm.h",
"src/compiler/graph-builder.h",
"src/compiler/graph-inl.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
@ -566,6 +593,8 @@ source_set("v8_base") {
"src/compiler/js-intrinsic-lowering.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
"src/compiler/js-type-feedback.cc",
"src/compiler/js-type-feedback.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
@ -573,6 +602,8 @@ source_set("v8_base") {
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/liveness-analyzer.cc",
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-peeling.cc",
@ -591,6 +622,7 @@ source_set("v8_base") {
"src/compiler/node-cache.h",
"src/compiler/node-marker.cc",
"src/compiler/node-marker.h",
"src/compiler/node-matchers.cc",
"src/compiler/node-matchers.h",
"src/compiler/node-properties.cc",
"src/compiler/node-properties.h",
@ -631,6 +663,8 @@ source_set("v8_base") {
"src/compiler/simplified-operator.h",
"src/compiler/source-position.cc",
"src/compiler/source-position.h",
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
@ -848,7 +882,6 @@ source_set("v8_base") {
"src/modules.cc",
"src/modules.h",
"src/msan.h",
"src/natives.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
@ -860,6 +893,8 @@ source_set("v8_base") {
"src/ostreams.h",
"src/parser.cc",
"src/parser.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h",
@ -929,20 +964,23 @@ source_set("v8_base") {
"src/scopeinfo.h",
"src/scopes.cc",
"src/scopes.h",
"src/serialize.cc",
"src/serialize.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot-common.cc",
"src/snapshot-source-sink.cc",
"src/snapshot-source-sink.h",
"src/snapshot.h",
"src/snapshot/natives.h",
"src/snapshot/serialize.cc",
"src/snapshot/serialize.h",
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/snapshot/snapshot.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
"src/strings-storage.cc",
"src/strings-storage.h",
"src/strtod.cc",
"src/strtod.h",
"src/token.cc",
@ -1356,11 +1394,11 @@ source_set("v8_libbase") {
if (is_linux) {
sources += [ "src/base/platform/platform-linux.cc" ]
libs = [ "rt" ]
libs = [ "dl", "rt" ]
} else if (is_android) {
defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
if (build_os == "mac") {
if (host_os == "mac") {
if (current_toolchain == host_toolchain) {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
@ -1425,12 +1463,12 @@ source_set("v8_libplatform") {
# Executables
#
if (current_toolchain == host_toolchain) {
if (current_toolchain == snapshot_toolchain) {
executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/mksnapshot.cc",
"src/snapshot/mksnapshot.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]

446
deps/v8/ChangeLog

@ -1,3 +1,449 @@
2015-03-30: Version 4.3.61
Performance and stability improvements on all platforms.
2015-03-28: Version 4.3.60
Reland^2 "Filter invalid slots out from the SlotsBuffer after marking."
(Chromium issues 454297, 470801).
This fixes missing incremental write barrier issue when double fields
unboxing is enabled (Chromium issue 469146).
Performance and stability improvements on all platforms.
2015-03-27: Version 4.3.59
Use a slot that is located on a heap page when removing invalid entries
from the SlotsBuffer (Chromium issue 470801).
Performance and stability improvements on all platforms.
2015-03-26: Version 4.3.58
Return timestamp of the last recorded interval to the caller of
HeapProfiler::GetHeapStats (Chromium issue 467222).
Performance and stability improvements on all platforms.
2015-03-26: Version 4.3.57
Reland [V8] Removed SourceLocationRestrict (Chromium issue 468781).
Performance and stability improvements on all platforms.
2015-03-25: Version 4.3.56
Remove v8::Isolate::ClearInterrupt.
Performance and stability improvements on all platforms.
2015-03-25: Version 4.3.55
Performance and stability improvements on all platforms.
2015-03-24: Version 4.3.54
Do not assign positions to parser-generated desugarings (Chromium issue
468661).
Performance and stability improvements on all platforms.
2015-03-24: Version 4.3.53
Filter invalid slots out from the SlotsBuffer after marking (Chromium
issue 454297).
Fix OOM bug 3976 (issue 3976).
Performance and stability improvements on all platforms.
2015-03-24: Version 4.3.52
Remove calls to IdleNotification().
Save heap object tracking data in heap snapshot (Chromium issue 467222).
Performance and stability improvements on all platforms.
2015-03-24: Version 4.3.51
[V8] Removed SourceLocationRestrict (Chromium issue 468781).
[turbofan] Fix control reducer bug with walking non-control edges during
ConnectNTL phase (Chromium issue 469605).
Performance and stability improvements on all platforms.
2015-03-23: Version 4.3.50
Performance and stability improvements on all platforms.
2015-03-23: Version 4.3.49
Ensure we don't overflow in BCE (Chromium issue 469148).
[turbofan] Fix lowering of Math.max for integral inputs (Chromium issue
468162).
Use libdl to get symbols for backtraces.
Performance and stability improvements on all platforms.
2015-03-19: Version 4.3.48
Clarify what APIs return Maybe and MaybeLocal values (issue 3929).
Introduce explicit constant for per Context debug data set by embedder
(Chromium issue 466631).
Adjust key behaviour for weak collections (issues 3970, 3971, Chromium
issue 460083).
Turn on overapproximation of the weak closure (issue 3862).
Performance and stability improvements on all platforms.
2015-03-18: Version 4.3.47
Performance and stability improvements on all platforms.
2015-03-17: Version 4.3.46
Performance and stability improvements on all platforms.
2015-03-17: Version 4.3.45
Performance and stability improvements on all platforms.
2015-03-17: Version 4.3.44
Performance and stability improvements on all platforms.
2015-03-16: Version 4.3.43
Bugfix in hydrogen GVN (Chromium issue 467481).
Remove obsolete TakeHeapSnapshot method from API (Chromium issue
465651).
Beautify syntax error for unterminated argument list (Chromium issue
339474).
Performance and stability improvements on all platforms.
2015-03-16: Version 4.3.42
Performance and stability improvements on all platforms.
2015-03-15: Version 4.3.41
Performance and stability improvements on all platforms.
2015-03-14: Version 4.3.40
Performance and stability improvements on all platforms.
2015-03-14: Version 4.3.39
Performance and stability improvements on all platforms.
2015-03-14: Version 4.3.38
Remove --harmony-scoping flag.
Performance and stability improvements on all platforms.
2015-03-13: Version 4.3.37
Implement TDZ in StoreIC for top-level lexicals (issue 3941).
Turn on job-based optimizing compiler (issue 3608).
Performance and stability improvements on all platforms.
2015-03-13: Version 4.3.36
Performance and stability improvements on all platforms.
2015-03-12: Version 4.3.35
Add Cast() for Int32 and Uint32 (Chromium issue 462402).
Incorrect handling of HTransitionElementsKind in hydrogen check
elimination phase fixed (Chromium issue 460917).
Performance and stability improvements on all platforms.
2015-03-12: Version 4.3.34
Performance and stability improvements on all platforms.
2015-03-12: Version 4.3.33
Fix the toolchain used to build the snapshots in GN (Chromium issues
395249, 465456).
Performance and stability improvements on all platforms.
2015-03-11: Version 4.3.32
Reland of Remove slots that point to unboxed doubles from the
StoreBuffer/SlotsBuffer (Chromium issues 454297, 465273).
Performance and stability improvements on all platforms.
2015-03-11: Version 4.3.31
Performance and stability improvements on all platforms.
2015-03-11: Version 4.3.30
Remove uid and title from HeapSnapshot (Chromium issue 465651).
Remove deprecated CpuProfiler methods.
[turbofan] Fix --turbo-osr for OSRing into inner loop inside for-in
(Chromium issue 462775).
Performance and stability improvements on all platforms.
2015-03-10: Version 4.3.29
Performance and stability improvements on all platforms.
2015-03-10: Version 4.3.28
Performance and stability improvements on all platforms.
2015-03-10: Version 4.3.27
Performance and stability improvements on all platforms.
2015-03-07: Version 4.3.26
Remove slots that point to unboxed doubles from the
StoreBuffer/SlotsBuffer (Chromium issue 454297).
Performance and stability improvements on all platforms.
2015-03-06: Version 4.3.25
Performance and stability improvements on all platforms.
2015-03-06: Version 4.3.24
convert more things to maybe (issue 3929).
Performance and stability improvements on all platforms.
2015-03-05: Version 4.3.23
[V8] Use Function.name for stack frames in v8::StackTrace (Chromium
issue 17356).
Allow passing sourceMapUrl when compiling scripts (Chromium issue
462572).
convert compile functions to use maybe (issue 3929).
Performance and stability improvements on all platforms.
2015-03-05: Version 4.3.22
give UniquePersistent full move semantics (issue 3669).
Performance and stability improvements on all platforms.
2015-03-05: Version 4.3.21
Performance and stability improvements on all platforms.
2015-03-04: Version 4.3.20
convert remaining object functions to maybes (issue 3929).
Performance and stability improvements on all platforms.
2015-03-04: Version 4.3.19
ARM assembler: fix undefined behaviour in fits_shifter (Chromium issues
444089, 463436).
Implement subclassing Arrays (issue 3930).
[es6] Fix for-const loops (issue 3983).
Performance and stability improvements on all platforms.
2015-03-04: Version 4.3.18
Implement subclassing Arrays (issue 3930).
Performance and stability improvements on all platforms.
2015-03-04: Version 4.3.17
Implement subclassing Arrays (issue 3930).
convert more object functions to return maybes (issue 3929).
Performance and stability improvements on all platforms.
2015-03-03: Version 4.3.16
check for null context on execution entry (issue 3929).
convert object::* to return maybe values (issue 3929).
Removed funky Maybe constructor and made fields private (issue 3929).
Polish Maybe API a bit, removing useless creativity and fixing some
signatures (issue 3929).
Performance and stability improvements on all platforms.
2015-03-02: Version 4.3.15
Performance and stability improvements on all platforms.
2015-03-02: Version 4.3.14
Performance and stability improvements on all platforms.
2015-02-28: Version 4.3.13
Disallow subclassing Arrays (issue 3930).
Performance and stability improvements on all platforms.
2015-02-28: Version 4.3.12
Performance and stability improvements on all platforms.
2015-02-27: Version 4.3.11
Disallow subclassing Arrays (issue 3930).
convert Value::*Value() function to return Maybe results (issue 3929).
Performance and stability improvements on all platforms.
2015-02-27: Version 4.3.10
Convert v8::Value::To* to use MaybeLocal (issue 3929).
Performance and stability improvements on all platforms.
2015-02-26: Version 4.3.9
Add public version macros (issue 3075).
Performance and stability improvements on all platforms.
2015-02-26: Version 4.3.8
Performance and stability improvements on all platforms.
2015-02-25: Version 4.3.7
Performance and stability improvements on all platforms.
2015-02-25: Version 4.3.6
Performance and stability improvements on all platforms.
2015-02-25: Version 4.3.5
Turn on job based recompilation (issue 3608).
Performance and stability improvements on all platforms.
2015-02-24: Version 4.3.4
Reland "Correctly propagate terminate exception in TryCall." (issue
3892).
Performance and stability improvements on all platforms.
2015-02-24: Version 4.3.3
Performance and stability improvements on all platforms.
2015-02-24: Version 4.3.2
Update GN build files with the cpu_arch -> current_cpu change.
Performance and stability improvements on all platforms.
2015-02-23: Version 4.3.1
Limit size of first page based on serialized data (Chromium issue
453111).
Performance and stability improvements on all platforms.
2015-02-19: Version 4.2.77
Make generator constructors configurable (issue 3902).

19
deps/v8/DEPS

@ -8,17 +8,17 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "34640080d08ab2a37665512e52142947def3056d",
Var("git_url") + "/external/gyp.git" + "@" + "d174d75bf69c682cb62af9187879e01513b35e52",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "4e3266f32c62d30a3f9e2232a753c60129d1e670",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "7c81740601355556e630da515b74d889ba2f8d08",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "5c5e924788fe40f7d6e0a3841ac572de2475e689",
Var("git_url") + "/chromium/buildtools.git" + "@" + "3b302fef93f7cc58d9b8168466905237484b2772",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "f6daa55d03995e82201a3278203e7c0421a59546",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "ea2f0a2d96ffc6f5a51c034db704ccc1a6543156",
}
deps_os = {
@ -46,6 +46,17 @@ skip_child_includes = [
]
hooks = [
{
# This clobbers when necessary (based on get_landmines.py). It must be the
# first hook so that other things that get/generate into the output
# directory will not subsequently be clobbered.
'name': 'landmines',
'pattern': '.',
'action': [
'python',
'v8/build/landmines.py',
],
},
# Pull clang-format binaries using checked-in hashes.
{
"name": "clang_format_win",

3
deps/v8/Makefile

@ -234,7 +234,8 @@ ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:

8
deps/v8/Makefile.android

@ -26,7 +26,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -66,6 +67,11 @@ else ifeq ($(ARCH), android_ia32)
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_x64)
DEFINES = target_arch=x64 v8_target_arch=x64 android_target_arch=x86_64 android_target_platform=21
TOOLCHAIN_ARCH = x86_64
TOOLCHAIN_PREFIX = x86_64-linux-android
TOOLCHAIN_VER = 4.9
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86

1
deps/v8/PRESUBMIT.py

@ -244,6 +244,7 @@ def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nodcheck_rel': set(['defaulttests']),
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),

6
deps/v8/README.md

@ -18,13 +18,13 @@ Getting the Code
Checkout [depot tools](http://www.chromium.org/developers/how-tos/install-depot-tools), and run
> `fetch v8`
fetch v8
This will checkout V8 into the directory `v8` and fetch all of its dependencies.
To stay up to date, run
> `git pull origin`
> `gclient sync`
git pull origin
gclient sync
For fetching all branches, add the following into your remote
configuration in `.git/config`:

18
deps/v8/build/android.gypi

@ -43,7 +43,13 @@
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'android_lib': '<(android_sysroot)/usr/lib',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
@ -52,7 +58,13 @@
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'android_lib': '<(android_sysroot)/usr/lib',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
@ -227,7 +239,7 @@
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64"', {
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],

8
deps/v8/build/detect_v8_host_arch.py

@ -41,6 +41,7 @@ def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
host_system = platform.system();
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
@ -56,6 +57,13 @@ def DoMain(_):
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
# Under AIX the value returned by platform.machine is not
# the best indicator of the host architecture
# AIX 6.1 which is the lowest level supported only provides
# a 64 bit kernel
if host_system == 'AIX':
host_arch = 'ppc64'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying

4
deps/v8/build/features.gypi

@ -102,7 +102,7 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
'v8_enable_handle_zapping%': 1,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@ -112,7 +112,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {

1
deps/v8/build/get_landmines.py

@ -20,6 +20,7 @@ def main():
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.'
return 0

52
deps/v8/build/gyp_environment.py

@ -0,0 +1,52 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Sets up various automatic gyp environment variables. These are used by
gyp_v8 and landmines.py which run at different stages of runhooks. To
make sure settings are consistent between them, all setup should happen here.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
V8_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
def apply_gyp_environment(file_path=None):
"""
Reads in a *.gyp_env file and applies the valid keys to os.environ.
"""
if not file_path or not os.path.exists(file_path):
return
file_contents = open(file_path).read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = ( 'V8_GYP_FILE',
'V8_GYP_SYNTAX_CHECK',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT', )
for var in supported_vars:
val = file_data.get(var)
if val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = val
def set_environment():
"""Sets defaults for GYP_* variables."""
if 'SKIP_V8_GYP_ENV' not in os.environ:
# Update the environment based on v8.gyp_env
gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env')
apply_gyp_environment(gyp_env_path)

41
deps/v8/build/gyp_v8

@ -31,6 +31,7 @@
# is invoked by V8 beyond what can be done in the gclient hooks.
import glob
import gyp_environment
import os
import platform
import shlex
@ -48,34 +49,6 @@ sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
def apply_gyp_environment(file_path=None):
"""
Reads in a *.gyp_env file and applies the valid keys to os.environ.
"""
if not file_path or not os.path.exists(file_path):
return
file_contents = open(file_path).read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = ( 'V8_GYP_FILE',
'V8_GYP_SYNTAX_CHECK',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT', )
for var in supported_vars:
val = file_data.get(var)
if val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = val
def additional_include_files(args=[]):
"""
Returns a list of additional (.gypi) files to include, without
@ -109,13 +82,6 @@ def additional_include_files(args=[]):
def run_gyp(args):
rc = gyp.main(args)
# Check for landmines (reasons to clobber the build). This must be run here,
# rather than a separate runhooks step so that any environment modifications
# from above are picked up.
print 'Running build/landmines.py...'
subprocess.check_call(
[sys.executable, os.path.join(script_dir, 'landmines.py')])
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
@ -124,10 +90,7 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
if 'SKIP_V8_GYP_ENV' not in os.environ:
# Update the environment based on v8.gyp_env
gyp_env_path = os.path.join(os.path.dirname(v8_root), 'v8.gyp_env')
apply_gyp_environment(gyp_env_path)
gyp_environment.set_environment()
# This could give false positives since it doesn't actually do real option
# parsing. Oh well.

9
deps/v8/build/landmine_utils.py

@ -47,10 +47,19 @@ def gyp_defines():
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""

178
deps/v8/build/landmines.py

@ -4,10 +4,9 @@
# found in the LICENSE file.
"""
This script runs every build as a hook. If it detects that the build should
be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
various build scripts will then check for the presence of this file and clobber
accordingly. The script will also emit the reasons for the clobber to stdout.
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
@ -15,9 +14,13 @@ build is clobbered.
"""
import difflib
import errno
import gyp_environment
import logging
import optparse
import os
import re
import shutil
import sys
import subprocess
import time
@ -28,46 +31,109 @@ import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_target_build_dir(build_tool, target):
def get_build_dir(build_tool, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out\Release'
'/mnt/data/b/build/slave/linux/build/src/out/Debug'
'/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild', target)
ret = os.path.join(SRC_DIR, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
ret = os.path.join(SRC_DIR, 'out', target)
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(SRC_DIR, output_dir)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build', target)
ret = os.path.join(SRC_DIR, 'build')
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def set_up_landmines(target, new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_target_build_dir(landmine_utils.builder(), target)
landmines_path = os.path.join(out_dir, '.landmines')
if not os.path.exists(out_dir):
def extract_gn_build_commands(build_ninja_file):
"""Extracts from a build.ninja the commands to run GN.
The commands to run GN are the gn rule and build.ninja build step at the
top of the build.ninja file. We want to keep these when deleting GN builds
since we want to preserve the command-line flags to GN.
On error, returns the empty string."""
result = ""
with open(build_ninja_file, 'r') as f:
# Read until the second blank line. The first thing GN writes to the file
# is the "rule gn" and the second is the section for "build build.ninja",
# separated by blank lines.
num_blank_lines = 0
while num_blank_lines < 2:
line = f.readline()
if len(line) == 0:
return '' # Unexpected EOF.
result += line
if line[0] == '\n':
num_blank_lines = num_blank_lines + 1
return result
def delete_build_dir(build_dir):
# GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
if not os.path.exists(build_ninja_d_file):
shutil.rmtree(build_dir)
return
if not os.path.exists(landmines_path):
print "Landmines tracker didn't exists."
# FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
# to know if we are right after a clobber or if it is first-time landmines
# deployment. Also, a landmine-triggered clobber right after a clobber is
# not possible. Different clobber methods for msvs, xcode and make all
# have different blacklists of files that are not deleted.
# GN builds aren't automatically regenerated when you sync. To avoid
# messing with the GN workflow, erase everything but the args file, and
# write a dummy build.ninja file that will automatically rerun GN the next
# time Ninja is run.
build_ninja_file = os.path.join(build_dir, 'build.ninja')
build_commands = extract_gn_build_commands(build_ninja_file)
try:
gn_args_file = os.path.join(build_dir, 'args.gn')
with open(gn_args_file, 'r') as f:
args_contents = f.read()
except IOError:
args_contents = ''
shutil.rmtree(build_dir)
# Put back the args file (if any).
os.mkdir(build_dir)
if args_contents != '':
with open(gn_args_file, 'w') as f:
f.write(args_contents)
# Write the build.ninja file sufficiently to regenerate itself.
with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
if build_commands != '':
f.write(build_commands)
else:
# Couldn't parse the build.ninja file, write a default thing.
f.write('''rule gn
command = gn -q gen //out/%s/
description = Regenerating ninja files
build build.ninja: gn
generator = 1
depfile = build.ninja.d
''' % (os.path.split(build_dir)[1]))
# Write a .d file for the build which references a nonexistant file. This
# will make Ninja always mark the build as dirty.
with open(build_ninja_d_file, 'w') as f:
f.write('build.ninja: nonexistant_file.gn\n')
def needs_clobber(landmines_path, new_landmines):
if os.path.exists(landmines_path):
triggered = os.path.join(out_dir, '.landmines_triggered')
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
@ -75,14 +141,54 @@ def set_up_landmines(target, new_landmines):
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
return True
else:
sys.stdout.write('Clobbering due to missing landmines file.\n')
return True
return False
with open(triggered, 'w') as f:
f.writelines(diff)
print "Setting landmine: %s" % triggered
elif os.path.exists(triggered):
# Remove false triggered landmines.
os.remove(triggered)
print "Removing landmine: %s" % triggered
def clobber_if_necessary(new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder())
landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if needs_clobber(landmines_path, new_landmines):
# Clobber contents of build directory but not directory itself: some
# checkouts have the build directory mounted.
for f in os.listdir(out_dir):
path = os.path.join(out_dir, f)
if os.path.basename(out_dir) == 'build':
# Only delete build directories and files for MSVS builds as the folder
# shares some checked out files and directories.
if (os.path.isdir(path) and
re.search(r'(?:[Rr]elease)|(?:[Dd]ebug)', f)):
delete_build_dir(path)
elif (os.path.isfile(path) and
(path.endswith('.sln') or
path.endswith('.vcxproj') or
path.endswith('.vcxproj.user'))):
os.unlink(path)
else:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
delete_build_dir(path)
if os.path.basename(out_dir) == 'xcodebuild':
# Xcodebuild puts an additional project file structure into build,
# while the output folder is xcodebuild.
project_dir = os.path.join(SRC_DIR, 'build', 'all.xcodeproj')
if os.path.exists(project_dir) and os.path.isdir(project_dir):
delete_build_dir(project_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
@ -123,14 +229,14 @@ def main():
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
gyp_environment.set_environment()
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
for target in ('Debug', 'Release'):
set_up_landmines(target, landmines)
clobber_if_necessary(landmines)
return 0

89
deps/v8/build/standalone.gypi

@ -146,11 +146,17 @@
}, {
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64")', {
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87")', {
'clang%': 1,
}, {
'clang%': 0,
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
],
# Default ARM variable settings.
'arm_version%': 'default',
@ -175,16 +181,11 @@
'default_configuration': 'Debug',
'configurations': {
'DebugBaseCommon': {
'cflags': [ '-g', '-O0' ],
'conditions': [
['(v8_target_arch=="ia32" or v8_target_arch=="x87") and \
OS=="linux"', {
'defines': [
'_GLIBCXX_DEBUG'
],
}],
[ 'OS=="aix"', {
'cflags': [ '-gxcoff' ],
['OS=="aix"', {
'cflags': [ '-g', '-Og', '-gxcoff' ],
}, {
'cflags': [ '-g', '-O0' ],
}],
],
},
@ -198,6 +199,19 @@
# Xcode insists on this empty entry.
},
},
'conditions':[
['(clang==1 or host_clang==1) and OS!="win"', {
# This is here so that all files get recompiled after a clang roll and
# when turning clang on or off.
# (defines are passed via the command line, and build systems rebuild
# things when their commandline changes). Nothing should ever read this
# define.
'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
'cflags+': [
'-Wno-format-pedantic',
],
}],
],
'target_conditions': [
['v8_code == 0', {
'defines!': [
@ -205,8 +219,33 @@
],
'conditions': [
['os_posix == 1 and OS != "mac"', {
# We don't want to get warnings from third-party code,
# so remove any existing warning-enabling flags like -Wall.
'cflags!': [
'-pedantic',
'-Wall',
'-Werror',
'-Wextra',
],
'cflags+': [
# Clang considers the `register` keyword as deprecated, but
# ICU uses it all over the place.
'-Wno-deprecated-register',
# ICU uses its own deprecated functions.
'-Wno-deprecated-declarations',
# ICU prefers `a && b || c` over `(a && b) || c`.
'-Wno-logical-op-parentheses',
# ICU has some `unsigned < 0` checks.
'-Wno-tautological-compare',
# uresdata.c has switch(RES_GET_TYPE(x)) code. The
# RES_GET_TYPE macro returns an UResType enum, but some switch
# statement contains case values that aren't part of that
# enum (e.g. URES_TABLE32 which is in UResInternalType). This
# is on purpose.
'-Wno-switch',
],
'cflags_cc!': [
'-Wnon-virtual-dtor',
],
}],
['OS == "mac"', {
@ -292,7 +331,6 @@
'cflags': [
'-Wall',
'<(werror)',
'-W',
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
@ -304,7 +342,7 @@
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
[ 'host_arch=="ppc64"', {
[ 'host_arch=="ppc64" and OS!="aix"', {
'cflags': [ '-mminimal-toc' ],
}],
[ 'visibility=="hidden" and v8_enable_backtrace==0', {
@ -323,7 +361,6 @@
'cflags': [
'-Wall',
'<(werror)',
'-W',
'-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern.
@ -466,7 +503,6 @@
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
@ -492,6 +528,31 @@
], # target_conditions
}, # target_defaults
}], # OS=="mac"
['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
'make_global_settings': [
['CC.host', '../<(clang_dir)/bin/clang'],
['CXX.host', '../<(clang_dir)/bin/clang++'],
],
}],
['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
'target_conditions': [
['_toolset=="host"', {
'cflags_cc': [ '-std=gnu++11', ],
}],
],
'target_defaults': {
'target_conditions': [
['_toolset=="host"', { 'cflags!': [ '-Wno-unused-local-typedefs' ]}],
],
},
}],
['clang==1 and "<(GENERATOR)"=="ninja"', {
# See http://crbug.com/110262
'target_defaults': {
'cflags': [ '-fcolor-diagnostics' ],
'xcode_settings': { 'OTHER_CFLAGS': [ '-fcolor-diagnostics' ] },
},
}],
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [

13
deps/v8/build/toolchain.gypi

@ -61,6 +61,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
# Force disable libstdc++ debug mode.
'disable_glibcxx_debug%': 0,
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
@ -1134,8 +1137,18 @@
# Support for backtrace_symbols.
'ldflags': [ '-rdynamic' ],
}],
['OS=="linux" and disable_glibcxx_debug==0', {
# Enable libstdc++ debugging facilities to help catch problems
# early, see http://crbug.com/65151 .
'defines': ['_GLIBCXX_DEBUG=1',],
}],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
'conditions': [
['v8_target_arch=="ppc64"', {
'cflags': [ '-maix64 -mcmodel=large' ],
}],
],
}],
['OS=="android"', {
'variables': {

15
deps/v8/include/v8-debug.h

@ -202,13 +202,22 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
static Local<Value> Call(v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>());
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Value> Call(v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Handle<v8::Function> fun,
Handle<Value> data = Handle<Value>());
/**
* Returns a mirror object for the given object.
*/
static Local<Value> GetMirror(v8::Handle<v8::Value> obj);
static V8_DEPRECATE_SOON("Use maybe version",
Local<Value> GetMirror(v8::Handle<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Handle<v8::Value> obj);
/**
* Makes V8 process all pending debug messages.

33
deps/v8/include/v8-profiler.h

@ -168,21 +168,12 @@ class V8_EXPORT CpuProfiler {
*/
void StartProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StartProfiling instead. */
V8_DEPRECATED("Use StartProfiling",
void StartCpuProfiling(Handle<String> title,
bool record_samples = false));
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
CpuProfile* StopProfiling(Handle<String> title);
/** Deprecated. Use StopProfiling instead. */
V8_DEPRECATED("Use StopProfiling",
const CpuProfile* StopCpuProfiling(Handle<String> title));
/**
* Tells the profiler whether the embedder is idle.
*/
@ -271,10 +262,6 @@ class V8_EXPORT HeapGraphNode {
*/
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
V8_DEPRECATED("Use GetShallowSize instead",
int GetSelfSize() const);
/** Returns node's own size, in bytes. */
size_t GetShallowSize() const;
@ -326,12 +313,6 @@ class V8_EXPORT HeapSnapshot {
kJSON = 0 // See format description near 'Serialize' method.
};
/** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const;
/** Returns heap snapshot title. */
Handle<String> GetTitle() const;
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetRoot() const;
@ -380,7 +361,8 @@ class V8_EXPORT HeapSnapshot {
* Nodes reference strings, other nodes, and edges by their indexes
* in corresponding arrays.
*/
void Serialize(OutputStream* stream, SerializationFormat format) const;
void Serialize(OutputStream* stream,
SerializationFormat format = kJSON) const;
};
@ -465,10 +447,9 @@ class V8_EXPORT HeapProfiler {
};
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
* Takes a heap snapshot and returns it.
*/
const HeapSnapshot* TakeHeapSnapshot(
Handle<String> title,
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
@ -490,17 +471,19 @@ class V8_EXPORT HeapProfiler {
* reports updates for all previous time intervals via the OutputStream
* object. Updates on each time interval are provided as a stream of the
* HeapStatsUpdate structure instances.
* The return value of the function is the last seen heap object Id.
* If |timestamp_us| is supplied, timestamp of the new entry will be written
* into it. The return value of the function is the last seen heap object Id.
*
* StartTrackingHeapObjects must be called before the first call to this
* method.
*/
SnapshotObjectId GetHeapStats(OutputStream* stream);
SnapshotObjectId GetHeapStats(OutputStream* stream,
int64_t* timestamp_us = NULL);
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
* calling PushHeapObjectsStats next time.
* calling GetHeapStats next time.
*/
void StopTrackingHeapObjects();

138
deps/v8/include/v8-util.h

@ -12,7 +12,7 @@
/**
* Support for Persistent containers.
*
* C++11 embedders can use STL containers with UniquePersistent values,
* C++11 embedders can use STL containers with Global values,
* but pre-C++11 does not support the required move semantic and hence
* may want these container classes.
*/
@ -22,7 +22,10 @@ typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0;
enum PersistentContainerCallbackType {
kNotWeak,
kWeak
// These correspond to v8::WeakCallbackType
kWeakWithParameter,
kWeakWithInternalFields,
kWeak = kWeakWithParameter // For backwards compatibility. Deprecate.
};
@ -101,12 +104,12 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) { }
static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) { }
static void Dispose(Isolate* isolate, Global<V> value, K key) {}
};
template <typename K, typename V>
class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
class DefaultGlobalMapTraits : public StdMapTraits<K, V> {
private:
template <typename T>
struct RemovePointer;
@ -114,25 +117,26 @@ class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
public:
// Weak callback & friends:
static const PersistentContainerCallbackType kCallbackType = kNotWeak;
typedef PersistentValueMap<
K, V, DefaultPhantomPersistentValueMapTraits<K, V> > MapType;
typedef void PhantomCallbackDataType;
typedef PersistentValueMap<K, V, DefaultGlobalMapTraits<K, V> > MapType;
typedef void WeakCallbackInfoType;
static PhantomCallbackDataType* PhantomCallbackParameter(MapType* map,
const K& key,
Local<V> value) {
return NULL;
static WeakCallbackInfoType* WeakCallbackParameter(MapType* map, const K& key,
Local<V> value) {
return nullptr;
}
static MapType* MapFromPhantomCallbackData(
const PhantomCallbackData<PhantomCallbackDataType>& data) {
return NULL;
static MapType* MapFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackInfoType>& data) {
return nullptr;
}
static K KeyFromPhantomCallbackData(
const PhantomCallbackData<PhantomCallbackDataType>& data) {
static K KeyFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackInfoType>& data) {
return K();
}
static void DisposeCallbackData(PhantomCallbackDataType* data) {}
static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) {}
static void DisposeCallbackData(WeakCallbackInfoType* data) {}
static void Dispose(Isolate* isolate, Global<V> value, K key) {}
static void DisposeWeak(Isolate* isolate,
const WeakCallbackInfo<WeakCallbackInfoType>& data,
K key) {}
private:
template <typename T>
@ -143,8 +147,8 @@ class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
/**
* A map wrapper that allows using UniquePersistent as a mapped value.
* C++11 embedders don't need this class, as they can use UniquePersistent
* A map wrapper that allows using Global as a mapped value.
* C++11 embedders don't need this class, as they can use Global
* directly in std containers.
*
* The map relies on a backing map, whose type and accessors are described
@ -203,7 +207,7 @@ class PersistentValueMapBase {
/**
* Return value for key and remove it from the map.
*/
UniquePersistent<V> Remove(const K& key) {
Global<V> Remove(const K& key) {
return Release(Traits::Remove(&impl_, key)).Pass();
}
@ -255,7 +259,7 @@ class PersistentValueMapBase {
private:
friend class PersistentValueMapBase;
friend class PersistentValueMap<K, V, Traits>;
friend class PhantomPersistentValueMap<K, V, Traits>;
friend class GlobalValueMap<K, V, Traits>;
explicit PersistentValueReference(PersistentContainerValue value)
: value_(value) { }
@ -293,24 +297,23 @@ class PersistentValueMapBase {
return reinterpret_cast<V*>(v);
}
static PersistentContainerValue ClearAndLeak(
UniquePersistent<V>* persistent) {
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
}
static PersistentContainerValue Leak(UniquePersistent<V>* persistent) {
static PersistentContainerValue Leak(Global<V>* persistent) {
return reinterpret_cast<PersistentContainerValue>(persistent->val_);
}
/**
* Return a container value as UniquePersistent and make sure the weak
* Return a container value as Global and make sure the weak
* callback is properly disposed of. All remove functionality should go
* through this.
*/
static UniquePersistent<V> Release(PersistentContainerValue v) {
UniquePersistent<V> p;
static Global<V> Release(PersistentContainerValue v) {
Global<V> p;
p.val_ = FromVal(v);
if (Traits::kCallbackType != kNotWeak && p.IsWeak()) {
Traits::DisposeCallbackData(
@ -319,6 +322,12 @@ class PersistentValueMapBase {
return p.Pass();
}
void RemoveWeak(const K& key) {
Global<V> p;
p.val_ = FromVal(Traits::Remove(&impl_, key));
p.Reset();
}
private:
PersistentValueMapBase(PersistentValueMapBase&);
void operator=(PersistentValueMapBase&);
@ -351,17 +360,17 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
/**
* Put value into map. Depending on Traits::kIsWeak, the value will be held
* by the map strongly or weakly.
* Returns old value as UniquePersistent.
* Returns old value as Global.
*/
UniquePersistent<V> Set(const K& key, Local<V> value) {
UniquePersistent<V> persistent(this->isolate(), value);
Global<V> Set(const K& key, Local<V> value) {
Global<V> persistent(this->isolate(), value);
return SetUnique(key, &persistent);
}
/**
* Put value into map, like Set(const K&, Local<V>).
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
Global<V> Set(const K& key, Global<V> value) {
return SetUnique(key, &value);
}
@ -369,7 +378,7 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put the value into the map, and set the 'weak' callback when demanded
* by the Traits class.
*/
UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
@ -384,8 +393,8 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put a value into the map and update the reference.
* Restrictions of GetReference apply here as well.
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
PersistentValueReference* reference) {
Global<V> Set(const K& key, Global<V> value,
PersistentValueReference* reference) {
*reference = this->Leak(&value);
return SetUnique(key, &value);
}
@ -406,9 +415,9 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
template <typename K, typename V, typename Traits>
class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
public:
explicit PhantomPersistentValueMap(Isolate* isolate)
explicit GlobalValueMap(Isolate* isolate)
: PersistentValueMapBase<K, V, Traits>(isolate) {}
typedef
@ -418,17 +427,17 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
/**
* Put value into map. Depending on Traits::kIsWeak, the value will be held
* by the map strongly or weakly.
* Returns old value as UniquePersistent.
* Returns old value as Global.
*/
UniquePersistent<V> Set(const K& key, Local<V> value) {
UniquePersistent<V> persistent(this->isolate(), value);
Global<V> Set(const K& key, Local<V> value) {
Global<V> persistent(this->isolate(), value);
return SetUnique(key, &persistent);
}
/**
* Put value into map, like Set(const K&, Local<V>).
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
Global<V> Set(const K& key, Global<V> value) {
return SetUnique(key, &value);
}
@ -436,11 +445,16 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put the value into the map, and set the 'weak' callback when demanded
* by the Traits class.
*/
UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
WeakCallbackType callback_type =
Traits::kCallbackType == kWeakWithInternalFields
? WeakCallbackType::kInternalFields
: WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetPhantom<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), WeakCallback, 0, 1);
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), WeakCallback,
callback_type);
}
PersistentContainerValue old_value =
Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));
@ -451,33 +465,32 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put a value into the map and update the reference.
* Restrictions of GetReference apply here as well.
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
PersistentValueReference* reference) {
Global<V> Set(const K& key, Global<V> value,
PersistentValueReference* reference) {
*reference = this->Leak(&value);
return SetUnique(key, &value);
}
private:
static void WeakCallback(
const PhantomCallbackData<typename Traits::WeakCallbackDataType>& data) {
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
PhantomPersistentValueMap<K, V, Traits>* persistentValueMap =
Traits::MapFromPhantomCallbackData(data);
K key = Traits::KeyFromPhantomCallbackData(data);
Traits::Dispose(data.GetIsolate(), persistentValueMap->Remove(key).Pass(),
key);
Traits::DisposeCallbackData(data.GetParameter());
GlobalValueMap<K, V, Traits>* persistentValueMap =
Traits::MapFromWeakCallbackInfo(data);
K key = Traits::KeyFromWeakCallbackInfo(data);
persistentValueMap->RemoveWeak(key);
Traits::DisposeWeak(data.GetIsolate(), data, key);
}
}
};
/**
* A map that uses UniquePersistent as value and std::map as the backing
* A map that uses Global as value and std::map as the backing
* implementation. Persistents are held non-weak.
*
* C++11 embedders don't need this class, as they can use
* UniquePersistent directly in std containers.
* Global directly in std containers.
*/
template<typename K, typename V,
typename Traits = DefaultPersistentValueMapTraits<K, V> >
@ -514,8 +527,8 @@ class DefaultPersistentValueVectorTraits {
/**
* A vector wrapper that safely stores UniquePersistent values.
* C++11 embedders don't need this class, as they can use UniquePersistent
* A vector wrapper that safely stores Global values.
* C++11 embedders don't need this class, as they can use Global
* directly in std containers.
*
* This class relies on a backing vector implementation, whose type and methods
@ -536,14 +549,14 @@ class PersistentValueVector {
* Append a value to the vector.
*/
void Append(Local<V> value) {
UniquePersistent<V> persistent(isolate_, value);
Global<V> persistent(isolate_, value);
Traits::Append(&impl_, ClearAndLeak(&persistent));
}
/**
* Append a persistent's value to the vector.
*/
void Append(UniquePersistent<V> persistent) {
void Append(Global<V> persistent) {
Traits::Append(&impl_, ClearAndLeak(&persistent));
}
@ -574,7 +587,7 @@ class PersistentValueVector {
void Clear() {
size_t length = Traits::Size(&impl_);
for (size_t i = 0; i < length; i++) {
UniquePersistent<V> p;
Global<V> p;
p.val_ = FromVal(Traits::Get(&impl_, i));
}
Traits::Clear(&impl_);
@ -589,8 +602,7 @@ class PersistentValueVector {
}
private:
static PersistentContainerValue ClearAndLeak(
UniquePersistent<V>* persistent) {
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
@ -606,4 +618,4 @@ class PersistentValueVector {
} // namespace v8
#endif // V8_UTIL_H_
#endif // V8_UTIL_H

4
deps/v8/include/v8-version.h

@ -9,8 +9,8 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 2
#define V8_BUILD_NUMBER 77
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 61
#define V8_PATCH_LEVEL 21
// Use 1 for candidates and 0 otherwise.

1162
deps/v8/include/v8.h

File diff suppressed because it is too large

17
deps/v8/include/v8config.h

@ -42,8 +42,8 @@
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
# define V8_GNUC_PREREQ(major, minor, patchlevel) \
((__GNUC__ * 10000 + __GNUC_MINOR__) >= \
# define V8_GNUC_PREREQ(major, minor, patchlevel) \
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#else
# define V8_GNUC_PREREQ(major, minor, patchlevel) 0
@ -343,6 +343,10 @@ declarator __attribute__((deprecated))
#endif
// a macro to make it easier to see what will be deprecated.
#define V8_DEPRECATE_SOON(message, declarator) declarator
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
@ -425,4 +429,13 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
# define V8_ALIGNOF(type) (sizeof(::v8::AlignOfHelper<type>) - sizeof(type))
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() WARN_UNUSED_RESULT;
#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
#define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
#endif // V8CONFIG_H_

2
deps/v8/src/DEPS

@ -7,7 +7,7 @@ include_rules = [
]
specific_include_rules = {
"(mksnapshot|d8)\.cc": [
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
}

97
deps/v8/src/accessors.cc

@ -242,14 +242,8 @@ void Accessors::ArrayLengthSetter(
return;
}
Handle<Object> exception;
maybe = isolate->factory()->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0));
if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> exception = isolate->factory()->NewRangeError(
"invalid_array_length", HandleVector<Object>(NULL, 0));
isolate->ScheduleThrow(*exception);
}
@ -1101,12 +1095,52 @@ void Accessors::FunctionLengthGetter(
}
MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, bool is_observed, Handle<Object> old_value) {
LookupIterator it(object, name);
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
it.ReconfigureDataProperty(value, it.property_details().attributes());
value = it.WriteDataValue(value);
if (is_observed && !old_value->SameValue(*value)) {
return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
}
return value;
}
MUST_USE_RESULT static MaybeHandle<Object> SetFunctionLength(
Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
Handle<Object> old_value;
bool is_observed = function->map()->is_observed();
if (is_observed) {
old_value = handle(Smi::FromInt(function->shared()->length()), isolate);
}
return ReplaceAccessorWithDataProperty(isolate, function,
isolate->factory()->length_string(),
value, is_observed, old_value);
}
void Accessors::FunctionLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function length is non writable, non configurable.
UNREACHABLE();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionLength(isolate, object, value).is_null()) {
isolate->OptionalRescheduleException(false);
}
}
@ -1137,12 +1171,35 @@ void Accessors::FunctionNameGetter(
}
MUST_USE_RESULT static MaybeHandle<Object> SetFunctionName(
Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
Handle<Object> old_value;
bool is_observed = function->map()->is_observed();
if (is_observed) {
old_value = handle(function->shared()->name(), isolate);
}
return ReplaceAccessorWithDataProperty(isolate, function,
isolate->factory()->name_string(),
value, is_observed, old_value);
}
void Accessors::FunctionNameSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Function name is non writable, non configurable.
UNREACHABLE();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionName(isolate, object, value).is_null()) {
isolate->OptionalRescheduleException(false);
}
}
@ -1459,14 +1516,8 @@ static void ModuleGetExport(
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception;
MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
isolate->ScheduleThrow(*exception);
return;
}
@ -1486,14 +1537,8 @@ static void ModuleSetExport(
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception;
MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
isolate->ScheduleThrow(*exception);
return;
}

6
deps/v8/src/api-natives.cc

@ -81,14 +81,14 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
LookupIterator it(object, Handle<Name>::cast(key),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.has_value);
DCHECK(maybe.IsJust());
duplicate = it.IsFound();
} else {
uint32_t index = 0;
key->ToArrayIndex(&index);
Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
if (!maybe.has_value) return MaybeHandle<Object>();
duplicate = maybe.value;
if (!maybe.IsJust()) return MaybeHandle<Object>();
duplicate = maybe.FromJust();
}
if (duplicate) {
Handle<Object> args[1] = {key};

3154
deps/v8/src/api.cc

File diff suppressed because it is too large

12
deps/v8/src/api.h

@ -319,6 +319,18 @@ inline v8::Local<T> ToApiHandle(
}
template <class T>
inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
Local<T>* local) {
v8::internal::Handle<v8::internal::Object> handle;
if (maybe.ToHandle(&handle)) {
*local = Utils::Convert<v8::internal::Object, T>(handle);
return true;
}
return false;
}
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \

39
deps/v8/src/arm/assembler-arm-inl.h

@ -121,7 +121,7 @@ Address RelocInfo::target_address_address() {
if (FLAG_enable_ool_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the
// serializerer and expects the address to reside within the code object.
// serializer and expects the address to reside within the code object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@ -184,12 +184,24 @@ void RelocInfo::set_target_object(Object* target,
}
Address RelocInfo::target_reference() {
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@ -298,11 +310,14 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, host_, NULL);
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(pc_, host_, NULL);
}
}
@ -333,6 +348,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@ -358,6 +375,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
@ -534,6 +553,12 @@ void Assembler::deserialization_set_special_target_at(
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||

26
deps/v8/src/arm/assembler-arm.cc

@ -42,7 +42,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/macro-assembler.h"
#include "src/serialize.h"
namespace v8 {
namespace internal {
@ -246,27 +245,6 @@ bool RelocInfo::IsInConstantPool() {
}
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
Instr* instr = reinterpret_cast<Instr*>(instructions);
for (int i = 0; i < instruction_count; i++) {
*(pc + i) = *(instr + i);
}
// Indicate that code has changed.
CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
// Patch the code at the current PC with a call to the target address.
// Additional guard instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Patch the code at the current address with a call to the target.
UNIMPLEMENTED();
}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-arm-inl.h for inlined constructors
@ -1011,7 +989,7 @@ static bool fits_shifter(uint32_t imm32,
Instr* instr) {
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
if ((imm8 <= 0xff)) {
*rotate_imm = rot;
*immed_8 = imm8;
@ -3324,7 +3302,7 @@ Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
int Assembler::DecodeShiftImm(Instr instr) {
int rotate = Instruction::RotateValue(instr) * 2;
int immed8 = Instruction::Immed8Value(instr);
return (immed8 >> rotate) | (immed8 << (32 - rotate));
return base::bits::RotateRight32(immed8, rotate);
}

11
deps/v8/src/arm/assembler-arm.h

@ -45,7 +45,7 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/serialize.h"
#include "src/compiler.h"
namespace v8 {
namespace internal {
@ -794,6 +794,11 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
@ -823,6 +828,8 @@ class Assembler : public AssemblerBase {
static const int kPcLoadDelta = 8;
static const int kJSReturnSequenceInstructions = 4;
static const int kJSReturnSequenceLength =
kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 3;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@ -1400,7 +1407,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, const int raw_position);
void RecordDeoptReason(const int reason, const SourcePosition position);
// Record the emission of a constant pool.
//

226
deps/v8/src/arm/builtins-arm.cc

@ -929,7 +929,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call.
__ Push(r1);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
__ LoadRoot(
ip, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ push(ip);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
@ -1334,50 +1336,99 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kArgsOffset = 2 * kPointerSize;
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
// Make r2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
__ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, calleeOffset));
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
}
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
Label entry, loop;
__ ldr(r0, MemOperand(fp, indexOffset));
__ b(&entry);
// Load the current argument from the arguments array and push it to the
// stack.
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, argumentsOffset));
__ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
__ push(r0);
// Use inline caching to access the arguments.
__ ldr(r0, MemOperand(fp, indexOffset));
__ add(r0, r0, Operand(1 << kSmiTagSize));
__ str(r0, MemOperand(fp, indexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ bind(&entry);
__ ldr(r1, MemOperand(fp, limitOffset));
__ cmp(r0, r1);
__ b(ne, &loop);
// On exit, the pushed arguments count is in r0, untagged
__ SmiUntag(r0);
}
// Used by FunctionApply and ReflectApply
static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kFormalParameters = targetIsArgument ? 3 : 2;
const int kStackSize = kFormalParameters + 1;
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
__ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
__ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
// Make r2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
__ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check.
if (targetIsArgument) {
__ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
} else {
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
}
Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
__ bind(&okay);
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
__ ldr(r0, MemOperand(fp, kRecvOffset));
__ ldr(r0, MemOperand(fp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
@ -1434,44 +1485,19 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
// Copy all arguments from the array to the stack.
Label entry, loop;
__ ldr(r0, MemOperand(fp, kIndexOffset));
__ b(&entry);
// Load the current argument from the arguments array and push it to the
// stack.
// r0: current argument index
__ bind(&loop);
__ ldr(r1, MemOperand(fp, kArgsOffset));
__ Push(r1, r0);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
__ push(r0);
// Use inline caching to access the arguments.
__ ldr(r0, MemOperand(fp, kIndexOffset));
__ add(r0, r0, Operand(1 << kSmiTagSize));
__ str(r0, MemOperand(fp, kIndexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ bind(&entry);
__ ldr(r1, MemOperand(fp, kLimitOffset));
__ cmp(r0, r1);
__ b(ne, &loop);
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(r0);
__ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
__ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ add(sp, sp, Operand(kStackSize * kPointerSize));
__ Jump(lr);
// Call the function proxy.
@ -1485,11 +1511,91 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Tear down the internal frame and remove function, receiver and args.
}
__ add(sp, sp, Operand(3 * kPointerSize));
__ add(sp, sp, Operand(kStackSize * kPointerSize));
__ Jump(lr);
}
static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kFormalParameters = 3;
const int kStackSize = kFormalParameters + 1;
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
__ ldr(r0, MemOperand(fp, kNewTargetOffset));
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &validate_arguments);
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ str(r0, MemOperand(fp, kNewTargetOffset));
// Validate arguments
__ bind(&validate_arguments);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
__ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
__ push(r0);
__ ldr(r0, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(r0);
__ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Push newTarget and callee functions
__ ldr(r0, MemOperand(fp, kNewTargetOffset));
__ push(r0);
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ push(r0);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Use undefined feedback vector
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
// Leave internal frame.
}
__ add(sp, sp, Operand(kStackSize * kPointerSize));
__ Jump(lr);
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Generate_ApplyHelper(masm, false);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Generate_ApplyHelper(masm, true);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Generate_ConstructHelper(masm);
}
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------

392
deps/v8/src/arm/code-stubs-arm.cc

@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@ -1018,13 +1019,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r0, Heap::kExceptionRootIndex);
__ b(eq, &exception_returned);
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ mov(r2, Operand(pending_exception_address));
__ ldr(r2, MemOperand(r2));
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@ -1045,25 +1045,53 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
// Retrieve the pending exception.
__ mov(r2, Operand(pending_exception_address));
__ ldr(r0, MemOperand(r2));
// Clear the pending exception.
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ str(r3, MemOperand(r2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
Label throw_termination_exception;
__ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
__ b(eq, &throw_termination_exception);
// Handle normal exception.
__ Throw(r0);
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set r0 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r0);
__ mov(r0, Operand(0));
__ mov(r1, Operand(0));
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(find_handler, 3);
}
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
// Retrieve the handler context, SP and FP.
__ mov(cp, Operand(pending_handler_context_address));
__ ldr(cp, MemOperand(cp));
__ mov(sp, Operand(pending_handler_sp_address));
__ ldr(sp, MemOperand(sp));
__ mov(fp, Operand(pending_handler_fp_address));
__ ldr(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r1, Operand(pending_handler_code_address));
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));
}
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2);
}
@ -1152,7 +1180,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
@ -1161,11 +1189,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
// Invoke: Link this frame into the handler chain.
__ bind(&invoke);
// Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
__ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@ -1202,7 +1229,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
__ PopStackHandler();
__ bind(&exit); // r0 holds result
// Check if the current stack frame is marked as the outermost JS frame.
@ -1479,7 +1506,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@ -1819,8 +1846,12 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
__ cmp(r1, Operand(Smi::FromInt(0)));
Label skip_decrement;
__ b(eq, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(r1, r1, Operand(2));
__ bind(&skip_decrement);
}
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
@ -1930,7 +1961,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@ -2200,18 +2231,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(r0, r1);
__ b(eq, &runtime);
__ str(r1, MemOperand(r2, 0)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
__ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
__ b(eq, &termination_exception);
__ Throw(r0);
__ bind(&termination_exception);
__ ThrowUncatchable(r0);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure and exception return null.
@ -2306,7 +2327,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@ -2883,7 +2904,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@ -2896,8 +2917,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
} else {
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@ -2908,7 +2934,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
__ pop(object_);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@ -3221,7 +3252,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// r0: original string
@ -3408,7 +3439,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@ -3689,7 +3720,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@ -4298,15 +4329,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorLoadStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawLoadStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorKeyedLoadStub stub(isolate());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawKeyedLoadStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@ -4324,6 +4355,237 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, Register scratch1,
Register scratch2, Register scratch3,
bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register receiver_map = scratch1;
Register cached_map = scratch2;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&compare_map);
__ ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
Register length = scratch3;
__ bind(&start_polymorphic);
__ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
// If the IC could be monomorphic we have to make sure we don't go past the
// end of the feedback array.
__ cmp(length, Operand(Smi::FromInt(2)));
__ b(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch3
// also need receiver_map (aka scratch1)
// use cached_map (scratch2) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
__ bind(&next_loop);
__ ldr(cached_map, MemOperand(pointer_reg));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &prepare_next);
__ ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&prepare_next);
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
__ cmp(pointer_reg, too_far);
__ b(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Register scratch,
Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
Register receiver_map = scratch;
Register cached_map = weak_cell;
// Move the weak map into the weak_cell register.
__ ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ cmp(cached_map, receiver_map);
__ b(ne, miss);
Register handler = weak_cell;
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
__ bind(&compare_smi_map);
__ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex);
__ b(ne, miss);
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register name = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register scratch1 = r5;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ b(ne, &try_array);
HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
&miss);
// Is it a fixed array?
__ bind(&try_array);
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r8,
r9, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &miss);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
scratch1, r8, r9);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register key = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register scratch1 = r5;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ b(ne, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
&miss);
__ bind(&try_array);
// Is it a fixed array?
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
r9, true, &miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ b(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
r9, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@ -4788,7 +5050,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@ -4810,15 +5071,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(r5, ip);
__ b(ne, &delete_allocated_handles);
// Check if the function scheduled an exception.
// Leave the API exit frame.
__ bind(&leave_exit_frame);
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(ip));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
__ bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ ldr(cp, *context_restore_operand);
@ -4830,15 +5084,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(r4, Operand(stack_space));
}
__ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(ip));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
__ mov(pc, lr);
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
{
FrameScope frame(masm, StackFrame::INTERNAL);
__ CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
}
__ jmp(&exception_handled);
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);

13
deps/v8/src/arm/cpu-arm.cc

@ -45,6 +45,18 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
register uint32_t end asm("r1") = beg + size;
register uint32_t flg asm("r2") = 0;
#ifdef __clang__
// This variant of the asm avoids a constant pool entry, which can be
// problematic when LTO'ing. It is also slightly shorter.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile("svc 0\n"
:
: "r"(beg), "r"(end), "r"(flg), "r"(scno)
: "memory");
#else
// Use a different variant of the asm with GCC because some versions doesn't
// support r7 as an asm input.
asm volatile(
// This assembly works for both ARM and Thumb targets.
@ -62,6 +74,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
: "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
: "memory");
#endif
#endif
}
} } // namespace v8::internal

42
deps/v8/src/arm/debug-arm.cc

@ -12,12 +12,7 @@
namespace v8 {
namespace internal {
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code changing the return from JS function sequence from
// mov sp, fp
// ldmia sp!, {fp, lr}
@ -28,7 +23,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// blx ip
// <debug break return code entry point address>
// bkpt 0
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
@ -37,29 +32,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSReturnSequenceInstructions);
}
// A debug break in the frame exit code is identified by the JS frame exit code
// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
// mov r2, r2
@ -69,7 +42,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// ldr ip, [pc, #0]
// blx ip
// <debug break slot code entry point address>
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
@ -77,13 +50,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
#define __ ACCESS_MASM(masm)

2
deps/v8/src/arm/deoptimizer-arm.cc

@ -135,7 +135,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.

3
deps/v8/src/arm/disasm-arm.cc

@ -33,6 +33,7 @@
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
@ -226,7 +227,7 @@ void Decoder::PrintShiftRm(Instruction* instr) {
void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
int imm = base::bits::RotateRight32(immed8, rotate);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}

5
deps/v8/src/arm/frames-arm.h

@ -152,11 +152,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_

215
deps/v8/src/arm/full-codegen-arm.cc

@ -107,7 +107,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@ -195,7 +196,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
if (info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@ -240,6 +241,11 @@ void FullCodeGenerator::Generate() {
}
}
ArgumentsAccessStub::HasNewTarget has_new_target =
IsSubclassConstructor(info->function()->kind())
? ArgumentsAccessStub::HAS_NEW_TARGET
: ArgumentsAccessStub::NO_NEW_TARGET;
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@ -248,6 +254,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
--num_parameters;
++rest_index;
}
__ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r2, Operand(Smi::FromInt(num_parameters)));
__ mov(r1, Operand(Smi::FromInt(rest_index)));
@ -281,10 +292,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::HasNewTarget has_new_target =
IsSubclassConstructor(info->function()->kind())
? ArgumentsAccessStub::HAS_NEW_TARGET
: ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@ -1529,7 +1536,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
CallGlobalLoadIC(var->name());
context()->Plug(r0);
break;
}
@ -2177,7 +2184,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ Push(load_name, r3, r0); // "throw", iter, except
@ -2188,16 +2194,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(r0); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
EnterTryBlock(expr->index(), &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r0); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
const int generator_object_depth = kPointerSize + try_block_size;
__ ldr(r0, MemOperand(sp, generator_object_depth));
__ push(r0); // g
__ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
__ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
@ -2205,12 +2212,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r0); // result
EmitReturnSequence();
__ bind(&l_resume); // received in r0
__ PopTryHandler();
ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@ -2570,6 +2577,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
__ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
__ push(r0);
}
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@ -2713,25 +2730,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r1);
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ b(ne, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@ -2748,6 +2746,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
} else if (var->mode() == CONST && op != Token::INIT_CONST) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label const_error;
MemOperand location = VarOperand(var, r1);
__ ldr(r3, location);
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
__ b(ne, &const_error);
__ mov(r3, Operand(var->name()));
__ push(r3);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@ -2769,8 +2782,33 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r1);
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ b(ne, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
if (is_strict(language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Silently ignore store in sloppy mode.
}
}
@ -2900,7 +2938,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ push(ip);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@ -3260,7 +3299,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@ -3764,8 +3802,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
Register instance_type = r2;
__ GetMapConstructor(r0, r0, r1, instance_type);
__ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
__ b(ne, &non_function_constructor);
// r0 now contains the constructor function. Grab the
@ -4062,7 +4101,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@ -4109,7 +4148,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@ -4284,7 +4323,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
__ CallRuntime(Runtime::kGetFromCache, 2);
__ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(r0);
@ -4570,18 +4609,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->function() != NULL &&
expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
}
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand());
@ -4604,7 +4636,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ str(r0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@ -4619,15 +4650,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
} else {
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
const Runtime::Function* function = expr->function();
switch (function->function_id) {
#define CALL_INTRINSIC_GENERATOR(Name) \
case Runtime::kInline##Name: { \
Comment cmnt(masm_, "[ Inline" #Name); \
return Emit##Name(expr); \
}
FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
#undef CALL_INTRINSIC_GENERATOR
default: {
Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
context()->Plug(r0);
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
context()->Plug(r0);
}
}
}
}
@ -5273,40 +5318,12 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
__ push(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ ldrb(r1, MemOperand(ip));
__ SmiTag(r1);
__ push(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ ldr(r1, MemOperand(ip));
__ push(r1);
}
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(r1));
// Restore pending message from stack.
__ pop(r1);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
__ str(r1, MemOperand(ip));
__ pop(r1);
__ SmiUntag(r1);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ strb(r1, MemOperand(ip));
__ pop(r1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
@ -5323,34 +5340,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
}
#undef __
#define __ ACCESS_MASM(masm())
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
int* stack_depth,
int* context_length) {
// The macros used here must preserve the result register.
// Because the handler block contains the context of the finally
// code, we can restore it directly from there for the finally code
// rather than iteratively unwinding contexts via their previous
// links.
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
__ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ PopTryHandler();
__ bl(finally_entry_);
*stack_depth = 0;
*context_length = 0;
return previous_;
}
#undef __

6
deps/v8/src/arm/interface-descriptors-arm.cc

@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);

18
deps/v8/src/arm/lithium-arm.cc

@ -2140,14 +2140,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@ -2162,16 +2154,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LOperand* value = UseRegister(instr->value());
// Use a temp to check the value in the cell in the case where we perform
// a hole check.
return instr->RequiresHoleCheck()
? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
: new(zone()) LStoreGlobalCell(value, NULL);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =

24
deps/v8/src/arm/lithium-arm.h

@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@ -142,7 +141,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@ -1704,13 +1702,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@ -1732,21 +1723,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
};
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {

145
deps/v8/src/arm/lithium-codegen-arm.cc

@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@ -119,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@ -345,54 +346,40 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ b(&needs_frame);
} else {
__ bind(&needs_frame);
Comment(";;; call deopt with frame");
__ PushFixedFrame();
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(ip);
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in
// entry_offset.
__ add(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ blx(entry_offset);
}
masm()->CheckConstPool(false, false);
Comment(";;; call deopt with frame");
__ PushFixedFrame();
__ bl(&needs_frame);
} else {
// The last entry can fall through into `call_deopt_entry`, avoiding a
// branch.
bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
if (need_branch) __ b(&call_deopt_entry);
masm()->CheckConstPool(false, !need_branch);
__ bl(&call_deopt_entry);
}
info()->LogDeoptCallPosition(masm()->pc_offset(),
table_entry->deopt_info.inlining_id);
masm()->CheckConstPool(false, false);
}
if (!call_deopt_entry.is_bound()) {
Comment(";;; call deopt");
__ bind(&call_deopt_entry);
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
__ push(ip);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
Comment(";;; call deopt");
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in entry_offset.
__ add(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ blx(entry_offset);
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
// Add the base address to the offset previously loaded in entry_offset.
__ add(entry_offset, entry_offset,
Operand(ExternalReference::ForDeoptEntry(base)));
__ bx(entry_offset);
}
// Force constant pool emission at the end of the deopt jump table to make
@ -893,8 +880,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ stop("trap_on_deopt", condition);
}
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
instr->Mnemonic(), deopt_reason);
Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
@ -902,6 +889,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@ -2729,10 +2717,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
__ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
@ -2838,8 +2827,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
__ mov(ip, Operand(cell));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
__ bind(deferred->load_bool()); // Label for calculating code patching.
@ -2993,18 +2982,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
}
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@ -3034,36 +3011,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
Register cell = scratch0();
// Load the cell.
__ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We use a temp to check the payload (CompareRoot might clobber ip).
Register payload = ToRegister(instr->temp());
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
// Store the value.
__ str(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@ -3156,8 +3109,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3448,7 +3402,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -4309,7 +4265,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -4530,8 +4488,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -5239,7 +5198,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ mov(ip, Operand(Handle<Object>(cell)));
__ mov(ip, Operand(cell));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {

139
deps/v8/src/arm/macro-assembler-arm.cc

@ -439,6 +439,7 @@ void MacroAssembler::LoadRoot(Register destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@ -1395,44 +1396,22 @@ void MacroAssembler::DebugBreak() {
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
mov(r5, Operand(CodeObject()));
mov(r6, Operand(state));
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
// Link the current handler as the next handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(r5, MemOperand(r6));
push(r5);
// Set this new handler as the current one.
str(sp, MemOperand(r6));
}
void MacroAssembler::PopTryHandler() {
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r1);
mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
@ -1441,98 +1420,6 @@ void MacroAssembler::PopTryHandler() {
}
void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
ConstantPoolUnavailableScope constant_pool_unavailable(this);
if (FLAG_enable_ool_constant_pool) {
ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
}
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
add(pc, r1, Operand::SmiUntag(r2)); // Jump
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0.
if (!value.is(r0)) {
mov(r0, value);
}
// Drop the stack pointer to the top of the top handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
// Restore the next handler.
pop(r2);
str(r2, MemOperand(r3));
// Get the code object (r1) and state (r2). Restore the context and frame
// pointer.
ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
tst(cp, cp);
str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
JumpToHandlerEntry();
}
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0.
if (!value.is(r0)) {
mov(r0, value);
}
// Drop the stack pointer to the top of the top stack handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label fetch_next, check_kind;
jmp(&check_kind);
bind(&fetch_next);
ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
tst(r2, Operand(StackHandler::KindField::kMask));
b(ne, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
pop(r2);
str(r2, MemOperand(r3));
// Get the code object (r1) and state (r2). Clear the context and frame
// pointer (0 was saved in the handler).
ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
JumpToHandlerEntry();
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@ -2292,6 +2179,20 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
JumpIfSmi(result, &done);
CompareObjectType(result, temp, temp2, MAP_TYPE);
b(ne, &done);
ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
b(&loop);
bind(&done);
}
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@ -2345,7 +2246,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
GetMapConstructor(result, result, scratch, ip);
}
// All done.

24
deps/v8/src/arm/macro-assembler-arm.h

@ -643,19 +643,12 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// Unlink the stack handler on top of the stack from the try handler chain.
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopTryHandler();
// Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(Register value);
void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@ -811,6 +804,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@ -1464,10 +1462,6 @@ class MacroAssembler: public Assembler {
Register bitmap_reg,
Register mask_reg);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
void JumpToHandlerEntry();
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);

3
deps/v8/src/arm/simulator-arm.cc

@ -13,6 +13,7 @@
#include "src/arm/constants-arm.h"
#include "src/arm/simulator-arm.h"
#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
@ -1506,7 +1507,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
int imm = base::bits::RotateRight32(immed8, rotate);
*carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
return imm;
}

46
deps/v8/src/arm64/assembler-arm64-inl.h

@ -18,7 +18,12 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
UNIMPLEMENTED();
// On arm64 only internal references need extra work.
DCHECK(RelocInfo::IsInternalReference(rmode_));
// Absolute code pointer inside code object moves with the code object.
intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
*p += delta; // Relocate entry.
}
@ -654,6 +659,12 @@ void Assembler::deserialization_set_special_target_at(
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
@ -733,12 +744,24 @@ void RelocInfo::set_target_object(Object* target,
}
Address RelocInfo::target_reference() {
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@ -826,11 +849,14 @@ void RelocInfo::set_call_address(Address target) {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, host_, NULL);
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
Assembler::set_target_address_at(pc_, host_, NULL);
}
}
@ -838,7 +864,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
// See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
@ -862,6 +888,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
@ -885,6 +913,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||

84
deps/v8/src/arm64/assembler-arm64.cc

@ -171,7 +171,7 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = 0;
const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
@ -188,26 +188,6 @@ bool RelocInfo::IsInConstantPool() {
}
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
Instr* instr = reinterpret_cast<Instr*>(instructions);
for (int i = 0; i < instruction_count; i++) {
*(pc + i) = *(instr + i);
}
// Indicate that code has changed.
CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
}
// Patch the code at the current PC with a call to the target address.
// Additional guard instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
UNIMPLEMENTED();
}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
@ -752,7 +732,15 @@ void Assembler::bind(Label* label) {
DCHECK(prevlinkoffset >= 0);
// Update the link to point to the label.
link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
if (link->IsUnresolvedInternalReference()) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
PatchingAssembler patcher(link, 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
}
// Link the label to the previous link in the chain.
if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
@ -2080,6 +2068,50 @@ void Assembler::ucvtf(const FPRegister& fd,
}
void Assembler::dcptr(Label* label) {
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
if (label->is_bound()) {
// The label is bound, so it does not need to be updated and the internal
// reference should be emitted.
//
// In this case, label->pos() returns the offset of the label from the
// start of the buffer.
internal_reference_positions_.push_back(pc_offset());
dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
} else {
int32_t offset;
if (label->is_linked()) {
// The label is linked, so the internal reference should be added
// onto the end of the label's link chain.
//
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
DCHECK(offset != kStartOfLabelLinkChain);
} else {
// The label is unused, so it now becomes linked and the internal
// reference is at the start of the new link chain.
offset = kStartOfLabelLinkChain;
}
// The instruction at pc is now the last link in the label's chain.
label->link_to(pc_offset());
// Traditionally the offset to the previous instruction in the chain is
// encoded in the instruction payload (e.g. branch range) but internal
// references are not instructions so while unbound they are encoded as
// two consecutive brk instructions. The two 16-bit immediates are used
// to encode the offset.
offset >>= kInstructionSizeLog2;
DCHECK(is_int32(offset));
uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
brk(high16);
brk(low16);
}
}
// Note:
// Below, a difference in case for the same letter indicates a
// negated bit.
@ -2839,6 +2871,12 @@ void Assembler::GrowBuffer() {
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
// Relocate internal references.
for (auto pos : internal_reference_positions_) {
intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
*p += pc_delta;
}
// Pending relocation entries are also relative, no need to relocate.
}
@ -2848,6 +2886,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) ||
(rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON)) {
@ -2857,6 +2896,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsDeoptReason(rmode)
|| RelocInfo::IsPosition(rmode)
|| RelocInfo::IsInternalReference(rmode)
|| RelocInfo::IsConstPool(rmode)
|| RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.

23
deps/v8/src/arm64/assembler-arm64.h

@ -5,14 +5,15 @@
#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
#define V8_ARM64_ASSEMBLER_ARM64_H_
#include <deque>
#include <list>
#include <map>
#include <vector>
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
#include "src/compiler.h"
#include "src/globals.h"
#include "src/serialize.h"
#include "src/utils.h"
@ -900,6 +901,11 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
static const int kSpecialTargetSize = kPointerSize;
@ -951,7 +957,9 @@ class Assembler : public AssemblerBase {
// Number of instructions generated for the return sequence in
// FullCodeGenerator::EmitReturnSequence.
static const int kJSRetSequenceInstructions = 7;
static const int kJSReturnSequenceInstructions = 7;
static const int kJSReturnSequenceLength =
kJSReturnSequenceInstructions * kInstructionSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 0;
@ -959,7 +967,7 @@ class Assembler : public AssemblerBase {
// Number of instructions necessary to be able to later patch it to a call.
// See DebugCodegen::GenerateSlot() and
// BreakLocationIterator::SetDebugBreakAtSlot().
// BreakLocation::SetDebugBreakAtSlot().
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
@ -1010,7 +1018,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, const int raw_position);
void RecordDeoptReason(const int reason, const SourcePosition position);
int buffer_space() const;
@ -1743,6 +1751,9 @@ class Assembler : public AssemblerBase {
// Emit 64 bits of data in the instruction stream.
void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
// Emit an address in the instruction stream.
void dcptr(Label* label);
// Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
@ -2159,6 +2170,10 @@ class Assembler : public AssemblerBase {
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
std::deque<int> internal_reference_positions_;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted

237
deps/v8/src/arm64/builtins-arm64.cc

@ -1324,53 +1324,106 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionApply");
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
const int kArgsOffset = 2 * kPointerSize;
const int kReceiverOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset) {
Register argc = x0;
Register function = x15;
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
__ Ldr(function, MemOperand(fp, calleeOffset));
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
__ Push(function, argc);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
}
__ Bind(&enough_stack_space);
}
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
Label entry, loop;
Register current = x0;
__ Ldr(current, MemOperand(fp, indexOffset));
__ B(&entry);
__ Bind(&loop);
// Load the current argument from the arguments array and push it.
// TODO(all): Couldn't we optimize this for JS arrays?
__ Ldr(x1, MemOperand(fp, argumentsOffset));
__ Push(x1, current);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
__ Push(x0);
// Use inline caching to access the arguments.
__ Ldr(current, MemOperand(fp, indexOffset));
__ Add(current, current, Smi::FromInt(1));
__ Str(current, MemOperand(fp, indexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ Bind(&entry);
__ Ldr(x1, MemOperand(fp, limitOffset));
__ Cmp(current, x1);
__ B(ne, &loop);
// On exit, the pushed arguments count is in x0, untagged
__ SmiUntag(current);
}
static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
const int kFormalParameters = targetIsArgument ? 3 : 2;
const int kStackSize = kFormalParameters + 1;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
const int kReceiverOffset = kArgumentsOffset + kPointerSize;
const int kFunctionOffset = kReceiverOffset + kPointerSize;
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
Register args = x12;
Register receiver = x14;
Register function = x15;
// Get the length of the arguments via a builtin call.
__ Ldr(function, MemOperand(fp, kFunctionOffset));
__ Ldr(args, MemOperand(fp, kArgsOffset));
__ Ldr(args, MemOperand(fp, kArgumentsOffset));
__ Push(function, args);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
if (targetIsArgument) {
__ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
} else {
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
}
Register argc = x0;
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
__ Ldr(function, MemOperand(fp, kFunctionOffset));
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
__ Push(function, argc);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
}
Generate_CheckStackOverflow(masm, kFunctionOffset);
__ Bind(&enough_stack_space);
// Push current limit and index.
__ Mov(x1, 0); // Initial index.
__ Push(argc, x1);
@ -1424,33 +1477,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Push(receiver);
// Copy all arguments from the array to the stack.
Label entry, loop;
Register current = x0;
__ Ldr(current, MemOperand(fp, kIndexOffset));
__ B(&entry);
__ Bind(&loop);
// Load the current argument from the arguments array and push it.
// TODO(all): Couldn't we optimize this for JS arrays?
__ Ldr(x1, MemOperand(fp, kArgsOffset));
__ Push(x1, current);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
__ Push(x0);
// Use inline caching to access the arguments.
__ Ldr(current, MemOperand(fp, kIndexOffset));
__ Add(current, current, Smi::FromInt(1));
__ Str(current, MemOperand(fp, kIndexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ Bind(&entry);
__ Ldr(x1, MemOperand(fp, kLimitOffset));
__ Cmp(current, x1);
__ B(ne, &loop);
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// At the end of the loop, the number of arguments is stored in 'current',
// represented as a smi.
@ -1460,12 +1488,11 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function.
Label call_proxy;
ParameterCount actual(current);
__ SmiUntag(current);
ParameterCount actual(x0);
__ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
__ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
__ Drop(3);
__ Drop(kStackSize);
__ Ret();
// Call the function proxy.
@ -1479,11 +1506,93 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
__ Drop(3);
__ Drop(kStackSize);
__ Ret();
}
static void Generate_ConstructHelper(MacroAssembler* masm) {
const int kFormalParameters = 3;
const int kStackSize = kFormalParameters + 1;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
const int kFunctionOffset = kArgumentsOffset + kPointerSize;
const int kIndexOffset =
StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
// Is x11 safe to use?
Register newTarget = x11;
Register args = x12;
Register function = x15;
// If newTarget is not supplied, set it to constructor
Label validate_arguments;
__ Ldr(x0, MemOperand(fp, kNewTargetOffset));
__ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
__ B(ne, &validate_arguments);
__ Ldr(x0, MemOperand(fp, kFunctionOffset));
__ Str(x0, MemOperand(fp, kNewTargetOffset));
// Validate arguments
__ Bind(&validate_arguments);
__ Ldr(function, MemOperand(fp, kFunctionOffset));
__ Ldr(args, MemOperand(fp, kArgumentsOffset));
__ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
__ Push(function, args, newTarget);
__ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
Register argc = x0;
Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index, constructor & newTarget
__ Mov(x1, 0); // Initial index.
__ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
__ Push(argc, x1, newTarget, function);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(
masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
__ Ldr(x1, MemOperand(fp, kFunctionOffset));
// Use undefined feedback vector
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
// Call the function.
CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
}
__ Drop(kStackSize);
__ Ret();
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_FunctionApply");
Generate_ApplyHelper(masm, false);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectApply");
Generate_ApplyHelper(masm, true);
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ReflectConstruct");
Generate_ConstructHelper(masm);
}
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------

395
deps/v8/src/arm64/code-stubs-arm64.cc

@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@ -1188,28 +1189,28 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ Bind(&exception_returned);
// Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
const Register& exception = result;
const Register& exception_address = x11;
__ Mov(exception_address, Operand(pending_exception_address));
__ Ldr(exception, MemOperand(exception_address));
// Clear the pending exception.
__ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
__ Str(x10, MemOperand(exception_address));
// x0 exception The exception descriptor.
// x21 argv
// x22 argc
// x23 target
// Special handling of termination exceptions, which are uncatchable by
// JavaScript code.
Label throw_termination_exception;
__ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
__ B(eq, &throw_termination_exception);
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set x0 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
__ Mov(x1, 0); // argv.
__ Mov(x2, ExternalReference::isolate_address(isolate()));
__ CallCFunction(find_handler, 3);
}
// We didn't execute a return case, so the stack frame hasn't been updated
// (except for the return address slot). However, we don't need to initialize
@ -1217,18 +1218,29 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// unwinds the stack.
__ SetStackPointer(jssp);
ASM_LOCATION("Throw normal");
__ Mov(argv, 0);
__ Mov(argc, 0);
__ Mov(target, 0);
__ Throw(x0, x10, x11, x12, x13);
__ Bind(&throw_termination_exception);
ASM_LOCATION("Throw termination");
__ Mov(argv, 0);
__ Mov(argc, 0);
__ Mov(target, 0);
__ ThrowUncatchable(x0, x10, x11, x12, x13);
// Retrieve the handler context, SP and FP.
__ Mov(cp, Operand(pending_handler_context_address));
__ Ldr(cp, MemOperand(cp));
__ Mov(jssp, Operand(pending_handler_sp_address));
__ Ldr(jssp, MemOperand(jssp));
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
Label not_js_frame;
__ Cbz(cp, &not_js_frame);
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_code_address));
__ Ldr(x10, MemOperand(x10));
__ Mov(x11, Operand(pending_handler_offset_address));
__ Ldr(x11, MemOperand(x11));
__ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
__ Add(x10, x10, x11);
__ Br(x10);
}
@ -1333,10 +1345,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(x0, Heap::kExceptionRootIndex);
__ B(&exit);
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
// Invoke: Link this frame into the handler chain.
__ Bind(&invoke);
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
__ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
// restores all callee-saved registers (including cp and fp) to their
@ -1370,7 +1381,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Blr(x12);
// Unlink this frame from the handler chain.
__ PopTryHandler();
__ PopStackHandler();
__ Bind(&exit);
@ -1454,7 +1465,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ Bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@ -2062,9 +2073,13 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
if (has_new_target()) {
__ Cmp(param_count, Operand(0));
Label skip_decrement;
__ B(eq, &skip_decrement);
// Skip new.target: it is not a part of arguments.
__ Sub(param_count, param_count, Operand(1));
__ SmiTag(param_count_smi, param_count);
__ Bind(&skip_decrement);
}
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@ -2209,7 +2224,7 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@ -2661,18 +2676,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
__ Str(x10, MemOperand(x11)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
Label termination_exception;
__ JumpIfRoot(exception_value,
Heap::kTerminationExceptionRootIndex,
&termination_exception);
__ Throw(exception_value, x10, x11, x12, x13);
__ Bind(&termination_exception);
__ ThrowUncatchable(exception_value, x10, x11, x12, x13);
// For exception, throw the exception again.
__ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
@ -2683,7 +2688,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@ -3299,7 +3304,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
MacroAssembler* masm,
MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@ -3307,8 +3312,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
} else {
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@ -3319,7 +3329,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
__ Pop(object_);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, VectorLoadICDescriptor::SlotRegister(),
VectorLoadICDescriptor::VectorRegister());
} else {
__ Pop(object_);
}
// Reload the instance type.
__ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@ -3616,7 +3631,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ Bind(&miss);
@ -3948,7 +3963,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
__ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// x1: result_length
@ -4156,7 +4171,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
__ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@ -4441,15 +4456,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorLoadStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawLoadStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorKeyedLoadStub stub(isolate());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
VectorRawKeyedLoadStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@ -4467,6 +4482,234 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, Register scratch1,
Register scratch2, Register scratch3,
bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register receiver_map = scratch1;
Register cached_map = scratch2;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Bind(&compare_map);
__ Ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(feedback);
Register length = scratch3;
__ Bind(&start_polymorphic);
__ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
__ Cmp(length, Operand(Smi::FromInt(2)));
__ B(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch3
// also need receiver_map (aka scratch1)
// use cached_map (scratch2) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(pointer_reg, feedback,
FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
__ Bind(&next_loop);
__ Ldr(cached_map, MemOperand(pointer_reg));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &prepare_next);
__ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
__ Bind(&prepare_next);
__ Add(pointer_reg, pointer_reg, kPointerSize * 2);
__ Cmp(pointer_reg, too_far);
__ B(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Register scratch,
Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
Register receiver_map = scratch;
Register cached_map = weak_cell;
// Move the weak map into the weak_cell register.
__ Ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Cmp(cached_map, receiver_map);
__ B(ne, miss);
Register handler = weak_cell;
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(weak_cell);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
// TODO(mvstanton): does this hold on ARM?
__ Bind(&compare_smi_map);
__ JumpIfNotRoot(weak_cell, Heap::kHeapNumberMapRootIndex, miss);
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register name = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register scratch1 = x5;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
&miss);
// Is it a fixed array?
__ Bind(&try_array);
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, x6,
x7, true, &miss);
__ Bind(&not_array);
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
scratch1, x6, x7);
__ Bind(&miss);
LoadIC::GenerateMiss(masm);
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register key = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register scratch1 = x5;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
&miss);
__ Bind(&try_array);
// Is it a fixed array?
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ Bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
x7, true, &miss);
__ Bind(&not_array);
// Is it generic?
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Cmp(key, feedback);
__ B(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
x7, false, &miss);
__ Bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@ -5256,7 +5499,6 @@ static void CallApiFunctionAndReturn(
}
Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@ -5278,6 +5520,7 @@ static void CallApiFunctionAndReturn(
__ Cmp(limit_reg, x1);
__ B(ne, &delete_allocated_handles);
// Leave the API exit frame.
__ Bind(&leave_exit_frame);
// Restore callee-saved registers.
__ Peek(x19, (spill_offset + 0) * kXRegSize);
@ -5285,13 +5528,6 @@ static void CallApiFunctionAndReturn(
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
__ Ldr(x5, MemOperand(x5));
__ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
&promote_scheduled_exception);
__ Bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ Ldr(cp, *context_restore_operand);
@ -5302,6 +5538,13 @@ static void CallApiFunctionAndReturn(
}
__ LeaveExitFrame(false, x1, !restore_context);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
__ Ldr(x5, MemOperand(x5));
__ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
&promote_scheduled_exception);
if (stack_space_operand != NULL) {
__ Drop(x2, 1);
} else {
@ -5309,13 +5552,9 @@ static void CallApiFunctionAndReturn(
}
__ Ret();
// Re-throw by promoting a scheduled exception.
__ Bind(&promote_scheduled_exception);
{
FrameScope frame(masm, StackFrame::INTERNAL);
__ CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
}
__ B(&exception_handled);
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ Bind(&delete_allocated_handles);

41
deps/v8/src/arm64/debug-arm64.cc

@ -15,12 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
@ -39,8 +35,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
STATIC_ASSERT(Assembler::kJSReturnSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 5);
byte* entry =
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
@ -59,27 +55,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
void BreakLocationIterator::ClearDebugBreakAtReturn() {
// Reset the code emitted by EmitReturnSequence to its original state.
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSRetSequenceInstructions);
}
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
void BreakLocation::SetDebugBreakAtSlot() {
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
@ -99,7 +75,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 4);
byte* entry =
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
@ -117,13 +93,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,

2
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -115,7 +115,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// TODO(all): This code needs to be revisited. We probably only need to save

5
deps/v8/src/arm64/frames-arm64.h

@ -99,11 +99,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_

226
deps/v8/src/arm64/full-codegen-arm64.cc

@ -105,7 +105,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@ -196,7 +197,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
if (info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
@ -241,6 +242,11 @@ void FullCodeGenerator::Generate() {
}
}
ArgumentsAccessStub::HasNewTarget has_new_target =
IsSubclassConstructor(info->function()->kind())
? ArgumentsAccessStub::HAS_NEW_TARGET
: ArgumentsAccessStub::NO_NEW_TARGET;
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@ -249,6 +255,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
--num_parameters;
++rest_index;
}
__ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
__ Mov(x2, Smi::FromInt(num_parameters));
__ Mov(x1, Smi::FromInt(rest_index));
@ -281,10 +292,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::HasNewTarget has_new_target =
IsSubclassConstructor(info->function()->kind())
? ArgumentsAccessStub::HAS_NEW_TARGET
: ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@ -456,10 +463,10 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence. This sequence can get patched when the debugger is used. See
// debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
// debug-arm64.cc:BreakLocation::SetDebugBreakAtReturn().
{
InstructionAccurateScope scope(masm_,
Assembler::kJSRetSequenceInstructions);
Assembler::kJSReturnSequenceInstructions);
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
// This code is generated using Assembler methods rather than Macro
@ -1508,7 +1515,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
CallGlobalLoadIC(var->name());
context()->Plug(x0);
break;
}
@ -2261,6 +2268,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ Push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
__ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
__ Push(x0);
}
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@ -2398,23 +2415,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Mov(x1, Operand(var->name()));
__ Push(x0, cp, x1);
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ Bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@ -2430,6 +2430,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
} else if (var->mode() == CONST && op != Token::INIT_CONST) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label const_error;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ Bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@ -2453,8 +2467,31 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Mov(x1, Operand(var->name()));
__ Push(x0, cp, x1);
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ Bind(&skip);
}
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
if (is_strict(language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Silently ignore store in sloppy mode.
}
}
@ -2586,7 +2623,12 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
{
UseScratchRegisterScope temps(masm_);
Register temp = temps.AcquireX();
__ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
__ Push(temp);
}
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@ -2945,8 +2987,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@ -3467,9 +3507,10 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
__ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
__ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
&non_function_constructor);
Register instance_type = x14;
__ GetMapConstructor(x12, x10, x13, instance_type);
__ Cmp(instance_type, JS_FUNCTION_TYPE);
__ B(ne, &non_function_constructor);
// x12 now contains the constructor function. Grab the
// instance class name from there.
@ -3764,7 +3805,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ B(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ Bind(&done);
context()->Plug(result);
@ -3810,7 +3851,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ B(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ Bind(&done);
context()->Plug(result);
@ -3985,7 +4026,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ Push(cache, key);
__ CallRuntime(Runtime::kGetFromCache, 2);
__ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ Bind(&done);
context()->Plug(x0);
@ -4254,18 +4295,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->function() != NULL &&
expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
}
Comment cmnt(masm_, "[ CallRunTime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
Comment cmnt(masm_, "[ CallRunTime");
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(LoadDescriptor::ReceiverRegister(),
@ -4287,7 +4321,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Pop(x10);
__ Push(x0, x10);
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@ -4302,15 +4335,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
} else {
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
const Runtime::Function* function = expr->function();
switch (function->function_id) {
#define CALL_INTRINSIC_GENERATOR(Name) \
case Runtime::kInline##Name: { \
Comment cmnt(masm_, "[ Inline" #Name); \
return Emit##Name(expr); \
}
FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
#undef CALL_INTRINSIC_GENERATOR
default: {
Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
context()->Plug(x0);
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
context()->Plug(x0);
}
}
}
}
@ -4980,7 +5027,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ Bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ Peek(x3, 1 * kPointerSize); // iter
__ Push(load_name, x3, x0); // "throw", iter, except
@ -4991,8 +5037,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ Bind(&l_try);
__ Pop(x0); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
EnterTryBlock(expr->index(), &l_catch);
const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(x0); // result
__ B(&l_suspend);
@ -5003,9 +5049,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ B(&l_resume);
__ Bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
const int generator_object_depth = kPointerSize + try_block_size;
__ Peek(x0, generator_object_depth);
__ Push(x0); // g
__ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
__ Mov(x1, Smi::FromInt(l_continuation.pos()));
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
@ -5013,12 +5060,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Mov(x1, cp);
__ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
kLRHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Pop(x0); // result
EmitReturnSequence();
__ Bind(&l_resume); // received in x0
__ PopTryHandler();
ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ Bind(&l_next);
@ -5288,20 +5335,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x10, pending_message_obj);
__ Ldr(x10, MemOperand(x10));
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ Mov(x11, has_pending_message);
__ Ldrb(x11, MemOperand(x11));
__ SmiTag(x11);
__ Push(x10, x11);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Mov(x10, pending_message_script);
__ Ldr(x10, MemOperand(x10));
__ Push(x10);
}
@ -5311,23 +5344,11 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(x10));
// Restore pending message from stack.
__ Pop(x10, x11, x12);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Mov(x13, pending_message_script);
__ Str(x10, MemOperand(x13));
__ SmiUntag(x11);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Mov(x13, has_pending_message);
STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ Strb(x11, MemOperand(x13));
__ Pop(x10);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x13, pending_message_obj);
__ Str(x12, MemOperand(x13));
__ Str(x10, MemOperand(x13));
// Restore result register and cooked return address from the stack.
__ Pop(x10, result_register());
@ -5437,37 +5458,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
#define __ ACCESS_MASM(masm())
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
int* stack_depth,
int* context_length) {
ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
// The macros used here must preserve the result register.
// Because the handler block contains the context of the finally
// code, we can restore it directly from there for the finally code
// rather than iteratively unwinding contexts via their previous
// links.
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
__ Peek(cp, StackHandlerConstants::kContextOffset);
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ PopTryHandler();
__ Bl(finally_entry_);
*stack_depth = 0;
*context_length = 0;
return previous_;
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

22
deps/v8/src/arm64/instructions-arm64.cc

@ -191,6 +191,9 @@ int64_t Instruction::ImmPCOffset() {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
} else if (IsUnresolvedInternalReference()) {
// Internal references are always word-aligned.
offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
@ -223,7 +226,10 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
SetUnresolvedInternalReferenceImmTarget(target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
}
}
@ -278,7 +284,23 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
DCHECK(is_int32(target_offset));
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
PatchingAssembler patcher(this, 2);
patcher.brk(high16);
patcher.brk(low16);
}
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);

41
deps/v8/src/arm64/instructions-arm64.h

@ -121,10 +121,18 @@ class Instruction {
return InstructionBits() & mask;
}
V8_INLINE const Instruction* following(int count = 1) const {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
}
V8_INLINE Instruction* following(int count = 1) {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
}
V8_INLINE const Instruction* preceding(int count = 1) const {
return following(-count);
}
V8_INLINE Instruction* preceding(int count = 1) {
return following(-count);
}
@ -189,6 +197,14 @@ class Instruction {
return Mask(PCRelAddressingMask) == ADR;
}
bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
bool IsUnresolvedInternalReference() const {
// Unresolved internal references are encoded as two consecutive brk
// instructions.
return IsBrk() && following()->IsBrk();
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
@ -306,6 +322,15 @@ class Instruction {
return 0;
}
int ImmUnresolvedInternalReference() const {
DCHECK(IsUnresolvedInternalReference());
// Unresolved references are encoded as two consecutive brk instructions.
// The associated immediate is made of the two 16-bit payloads.
int32_t high16 = ImmException();
int32_t low16 = following()->ImmException();
return (high16 << 16) | low16;
}
bool IsBranchAndLinkToRegister() const {
return Mask(UnconditionalBranchToRegisterMask) == BLR;
}
@ -349,6 +374,7 @@ class Instruction {
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@ -359,13 +385,18 @@ class Instruction {
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
V8_INLINE const Instruction* InstructionAtOffset(
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
// The FUZZ_disasm test relies on no check being done.
DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
return this + offset;
}
V8_INLINE Instruction* InstructionAtOffset(
int64_t offset,
CheckAlignment check = CHECK_ALIGNMENT) {
Address addr = reinterpret_cast<Address>(this) + offset;
int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
// The FUZZ_disasm test relies on no check being done.
DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
return Cast(addr);
DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
return this + offset;
}
template<typename T> V8_INLINE static Instruction* Cast(T src) {

9
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -261,6 +261,15 @@ void InternalArrayConstructorDescriptor::Initialize(
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value to compare

20
deps/v8/src/arm64/lithium-arm64.cc

@ -1692,14 +1692,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@ -2351,18 +2343,6 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LOperand* value = UseRegister(instr->value());
if (instr->RequiresHoleCheck()) {
return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
TempRegister(),
TempRegister()));
} else {
return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
}
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* temp = NULL;

26
deps/v8/src/arm64/lithium-arm64.h

@ -102,7 +102,6 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
@ -151,7 +150,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
V(StoreGlobalCell) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@ -1731,13 +1729,6 @@ class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@ -2809,23 +2800,6 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
};
class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 2> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
};
class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right)

201
deps/v8/src/arm64/lithium-codegen-arm64.cc

@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@ -434,7 +435,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
after_push_argument_ = false;
}
@ -449,7 +449,6 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
after_push_argument_ = false;
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -497,7 +496,6 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
after_push_argument_ = false;
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -519,7 +517,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
__ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
__ Ldr(cp, ToMemOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@ -662,7 +660,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
@ -841,7 +839,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateJumpTable() {
Label needs_frame, restore_caller_doubles, call_deopt_entry;
Label needs_frame, call_deopt_entry;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
@ -863,55 +861,52 @@ bool LCodeGen::GenerateJumpTable() {
// address and add an immediate offset.
__ Mov(entry_offset, entry - base);
// The last entry can fall through into `call_deopt_entry`, avoiding a
// branch.
bool last_entry = (i + 1) == length;
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (!needs_frame.is_bound()) {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
UseScratchRegisterScope temps(masm());
Register stub_marker = temps.AcquireX();
__ Bind(&needs_frame);
__ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
__ Push(lr, fp, cp, stub_marker);
__ Add(fp, __ StackPointer(), 2 * kPointerSize);
if (!last_entry) __ B(&call_deopt_entry);
} else {
// Reuse the existing needs_frame code.
__ B(&needs_frame);
}
} else if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
if (!restore_caller_doubles.is_bound()) {
__ Bind(&restore_caller_doubles);
RestoreCallerDoubles();
if (!last_entry) __ B(&call_deopt_entry);
} else {
// Reuse the existing restore_caller_doubles code.
__ B(&restore_caller_doubles);
}
Comment(";;; call deopt with frame");
// Save lr before Bl, fp will be adjusted in the needs_frame code.
__ Push(lr, fp);
// Reuse the existing needs_frame code.
__ Bl(&needs_frame);
} else {
// There is nothing special to do, so just continue to the second-level
// table.
if (!last_entry) __ B(&call_deopt_entry);
__ Bl(&call_deopt_entry);
}
info()->LogDeoptCallPosition(masm()->pc_offset(),
table_entry->deopt_info.inlining_id);
masm()->CheckConstPool(false, last_entry);
masm()->CheckConstPool(false, false);
}
if (needs_frame.is_linked()) {
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
Comment(";;; needs_frame common code");
UseScratchRegisterScope temps(masm());
Register stub_marker = temps.AcquireX();
__ Bind(&needs_frame);
__ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
__ Push(cp, stub_marker);
__ Add(fp, __ StackPointer(), 2 * kPointerSize);
}
// Generate common code for calling the second-level deopt table.
Register deopt_entry = temps.AcquireX();
__ Bind(&call_deopt_entry);
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
Register deopt_entry = temps.AcquireX();
__ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
RelocInfo::RUNTIME_ENTRY));
__ Add(deopt_entry, deopt_entry, entry_offset);
__ Call(deopt_entry);
__ Br(deopt_entry);
}
// Force constant pool emission at the end of the deopt jump table to make
@ -1057,14 +1052,15 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
instr->Mnemonic(), deopt_reason);
Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(
@ -1151,7 +1147,7 @@ void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
}
@ -1276,38 +1272,13 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
}
MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
int fp_offset = StackSlotOffset(op->index());
if (op->index() >= 0) {
// Loads and stores have a bigger reach in positive offset than negative.
// When the load or the store can't be done in one instruction via fp
// (too big negative offset), we try to access via jssp (positive offset).
// We can reference a stack slot from jssp only if jssp references the end
// of the stack slots. It's not the case when:
// - stack_mode != kCanUseStackPointer: this is the case when a deferred
// code saved the registers.
// - after_push_argument_: arguments has been pushed for a call.
// - inlined_arguments_: inlined arguments have been pushed once. All the
// remainder of the function cannot trust jssp any longer.
// - saves_caller_doubles: some double registers have been pushed, jssp
// references the end of the double registers and not the end of the
// stack slots.
// Also, if the offset from fp is small enough to make a load/store in
// one instruction, we use a fp access.
if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
!inlined_arguments_ && !is_int9(fp_offset) &&
!info()->saves_caller_doubles()) {
int jssp_offset =
(GetStackSlotCount() - op->index() - 1) * kPointerSize;
return MemOperand(masm()->StackPointer(), jssp_offset);
}
}
return MemOperand(fp, fp_offset);
return MemOperand(fp, StackSlotOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@ -1711,10 +1682,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// We push some arguments and they will be pop in an other block. We can't
// trust that jssp references the end of the stack slots until the end of
// the function.
inlined_arguments_ = true;
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
@ -2131,8 +2098,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
generator.AfterCall();
}
after_push_argument_ = false;
}
@ -2152,13 +2117,11 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
after_push_argument_ = false;
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
after_push_argument_ = false;
}
@ -2184,7 +2147,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default:
UNREACHABLE();
}
after_push_argument_ = false;
}
@ -2437,15 +2399,17 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
{
UseScratchRegisterScope temps(masm());
Register instance_type = temps.AcquireX();
__ GetMapConstructor(scratch1, map, scratch2, instance_type);
__ Cmp(instance_type, JS_FUNCTION_TYPE);
}
// Objects with a non-function constructor have class 'Object'.
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ JumpIfNotObjectType(
scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
__ B(ne, true_label);
} else {
__ JumpIfNotObjectType(
scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
__ B(ne, false_label);
}
// The constructor function is in scratch1. Get its instance class name.
@ -2664,7 +2628,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
UseScratchRegisterScope temps(masm());
Register temp = temps.AcquireX();
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ Mov(temp, Operand(Handle<Object>(cell)));
__ Mov(temp, Operand(cell));
__ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
__ Cmp(reg, temp);
} else {
@ -3139,8 +3103,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&map_check);
// Will be patched with the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ ldr(scratch, Immediate(Handle<Object>(cell)));
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ ldr(scratch, Immediate(cell));
__ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ cmp(map, scratch);
__ b(&cache_miss, ne);
// The address of this instruction is computed relative to the map check
@ -3238,7 +3202,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
after_push_argument_ = false;
}
@ -3402,17 +3365,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
}
}
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@ -3441,7 +3393,8 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3693,7 +3646,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@ -3750,8 +3705,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@ -4787,8 +4743,6 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
after_push_argument_ = true;
}
@ -5185,30 +5139,6 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
}
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
Register cell = ToRegister(instr->temp1());
// Load the cell.
__ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
Register payload = ToRegister(instr->temp2());
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
}
// Store the value.
__ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
Register ext_ptr = ToRegister(instr->elements());
Register key = no_reg;
@ -5381,8 +5311,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -5492,7 +5423,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}

21
deps/v8/src/arm64/lithium-codegen-arm64.h

@ -37,16 +37,10 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple),
after_push_argument_(false),
inlined_arguments_(false) {
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
~LCodeGen() {
DCHECK(!after_push_argument_ || inlined_arguments_);
}
// Simple accessors.
Scope* scope() const { return scope_; }
@ -87,9 +81,7 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
MemOperand ToMemOperand(LOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
@ -366,15 +358,6 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
// This flag is true when we are after a push (but before a call).
// In this situation, jssp no longer references the end of the stack slots so,
// we can only reference a stack slot via fp.
bool after_push_argument_;
// If we have inlined arguments, we are no longer able to use jssp because
// jssp is modified and we never know if we are in a block after or before
// the pop of the arguments (which restores jssp).
bool inlined_arguments_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {

155
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -1403,6 +1403,7 @@ void MacroAssembler::LoadRoot(CPURegister destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Str(source, MemOperand(root, index << kPointerSizeLog2));
}
@ -1549,27 +1550,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
}
void MacroAssembler::JumpToHandlerEntry(Register exception,
Register object,
Register state,
Register scratch1,
Register scratch2) {
// Handler expects argument in x0.
DCHECK(exception.Is(x0));
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
Lsr(scratch2, state, StackHandler::kKindWidth);
Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
Br(scratch1);
}
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
@ -1582,95 +1562,6 @@ void MacroAssembler::InNewSpace(Register object,
}
void MacroAssembler::Throw(Register value,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top handler.
DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
// Restore the next handler.
Pop(scratch2);
Str(scratch2, MemOperand(scratch1));
// Get the code object and state. Restore the context and frame pointer.
Register object = scratch1;
Register state = scratch2;
Pop(object, state, cp, fp);
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
Label not_js_frame;
Cbz(cp, &not_js_frame);
Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
Bind(&not_js_frame);
JumpToHandlerEntry(value, object, state, scratch3, scratch4);
}
void MacroAssembler::ThrowUncatchable(Register value,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top stack handler.
DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
// Unwind the handlers until the ENTRY handler is found.
Label fetch_next, check_kind;
B(&check_kind);
Bind(&fetch_next);
Peek(jssp, StackHandlerConstants::kNextOffset);
Bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
Peek(scratch2, StackHandlerConstants::kStateOffset);
TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
Pop(scratch2);
Str(scratch2, MemOperand(scratch1));
// Get the code object and state. Clear the context and frame pointer (0 was
// saved in the handler).
Register object = scratch1;
Register state = scratch2;
Pop(object, state, cp, fp);
JumpToHandlerEntry(value, object, state, scratch3, scratch4);
}
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -3147,46 +3038,26 @@ void MacroAssembler::DebugBreak() {
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
void MacroAssembler::PushStackHandler() {
DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve the live registers x0-x4.
// (See JSEntryStub::GenerateBody().)
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
// Set up the code object and the state for pushing.
Mov(x10, Operand(CodeObject()));
Mov(x11, state);
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
DCHECK(Smi::FromInt(0) == 0);
Push(xzr, xzr, x11, x10);
} else {
Push(fp, cp, x11, x10);
}
// Link the current handler as the next handler.
Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
Ldr(x10, MemOperand(x11));
Push(x10);
// Set this new handler as the current one.
Str(jssp, MemOperand(x11));
}
void MacroAssembler::PopTryHandler() {
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Pop(x10);
Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
@ -3705,6 +3576,20 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
Bind(&loop);
JumpIfSmi(result, &done);
CompareObjectType(result, temp, temp2, MAP_TYPE);
B(ne, &done);
Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
B(&loop);
Bind(&done);
}
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@ -3756,7 +3641,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: fetch prototype from constructor field in initial
// map.
Bind(&non_instance);
Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
GetMapConstructor(result, result, scratch, scratch);
}
// All done.

37
deps/v8/src/arm64/macro-assembler-arm64.h

@ -1078,22 +1078,6 @@ class MacroAssembler : public Assembler {
// This is required for compatibility in architecture indepenedant code.
inline void jmp(Label* L) { B(L); }
// Passes thrown value to the handler of top of the try handler chain.
// Register value must be x0.
void Throw(Register value,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain. Register value must be x0.
void ThrowUncatchable(Register value,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub);
@ -1289,12 +1273,12 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
void PushTryHandler(StackHandler::Kind kind, int handler_index);
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// Unlink the stack handler on top of the stack from the try handler chain.
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopTryHandler();
void PopStackHandler();
// ---------------------------------------------------------------------------
@ -1378,6 +1362,11 @@ class MacroAssembler : public Assembler {
kDontMissOnBoundFunction
};
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@ -2070,14 +2059,6 @@ class MacroAssembler : public Assembler {
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
void JumpToHandlerEntry(Register exception,
Register object,
Register state,
Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Condition cond, // eq for new space, ne otherwise.

2
deps/v8/src/array.js

@ -240,7 +240,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
MathMin(len - del_count + num_additional_args, 0xffffffff));
$min(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {

8
deps/v8/src/arraybuffer.js

@ -39,16 +39,16 @@ function ArrayBufferSlice(start, end) {
var first;
var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
first = MathMax(byte_length + relativeStart, 0);
first = $max(byte_length + relativeStart, 0);
} else {
first = MathMin(relativeStart, byte_length);
first = $min(relativeStart, byte_length);
}
var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
fin = MathMax(byte_length + relativeEnd, 0);
fin = $max(byte_length + relativeEnd, 0);
} else {
fin = MathMin(relativeEnd, byte_length);
fin = $min(relativeEnd, byte_length);
}
if (fin < first) {

62
deps/v8/src/assembler.cc

@ -54,7 +54,7 @@
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/serialize.h"
#include "src/snapshot/serialize.h"
#include "src/token.h"
#if V8_TARGET_ARCH_IA32
@ -292,11 +292,6 @@ int Label::pos() const {
// (Bits 6..31 of pc delta, with leading zeroes
// dropped, and last non-zero chunk tagged with 1.)
#ifdef DEBUG
const int kMaxStandardNonCompactModes = 14;
#endif
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4;
@ -452,8 +447,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#endif
DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
DCHECK(rinfo->pc() - last_pc_ >= 0);
DCHECK(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
@ -465,7 +458,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kSmallDataBits)) {
@ -483,12 +476,12 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
if (rmode == RelocInfo::STATEMENT_POSITION) {
WritePosition(pc_delta, pos_delta, rmode);
} else {
DCHECK(rmode == RelocInfo::POSITION);
DCHECK_EQ(rmode, RelocInfo::POSITION);
if (pc_delta != 0 || last_mode_ != RelocInfo::POSITION) {
FlushPosition();
next_position_candidate_pc_delta_ = pc_delta;
@ -511,10 +504,14 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
: kVeneerPoolTag);
} else {
DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
DCHECK(rmode <= RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM);
STATIC_ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM -
RelocInfo::LAST_COMPACT_ENUM <=
kPoolExtraTag);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM - 1;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
DCHECK(saved_mode < kPoolExtraTag);
DCHECK(0 <= saved_mode && saved_mode < kPoolExtraTag);
WriteExtraTaggedPC(pc_delta, saved_mode);
}
last_pc_ = rinfo->pc();
@ -721,7 +718,7 @@ void RelocIterator::next() {
Advance(kIntSize);
} else {
AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM + 1;
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
@ -832,6 +829,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
case RelocInfo::INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
case RelocInfo::DEOPT_REASON:
return "deopt reason";
case RelocInfo::CONST_POOL:
@ -861,8 +860,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
os << " (" << ref_encoder.NameOfAddress(target_reference()) << ") ("
<< static_cast<const void*>(target_reference()) << ")";
os << " ("
<< ref_encoder.NameOfAddress(isolate, target_external_reference())
<< ") (" << static_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
@ -910,13 +911,21 @@ void RelocInfo::Verify(Isolate* isolate) {
CHECK(code->address() == HeapObject::cast(found)->address());
break;
}
case INTERNAL_REFERENCE:
case INTERNAL_REFERENCE_ENCODED: {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
Code* code = Code::cast(isolate->FindCodeObject(pc));
CHECK(target >= code->instruction_start());
CHECK(target <= code->instruction_end());
break;
}
case RUNTIME_ENTRY:
case JS_RETURN:
case COMMENT:
case POSITION:
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
case INTERNAL_REFERENCE:
case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
@ -1223,8 +1232,7 @@ ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
ExternalReference ExternalReference::old_data_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldDataSpaceAllocationTopAddress());
return ExternalReference(isolate->heap()->OldDataSpaceAllocationTopAddress());
}
@ -1265,18 +1273,6 @@ ExternalReference ExternalReference::address_of_pending_message_obj(
}
ExternalReference ExternalReference::address_of_has_pending_message(
Isolate* isolate) {
return ExternalReference(isolate->has_pending_message_address());
}
ExternalReference ExternalReference::address_of_pending_message_script(
Isolate* isolate) {
return ExternalReference(isolate->pending_message_script_address());
}
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
@ -1656,9 +1652,11 @@ bool PositionsRecorder::WriteRecordedPositions() {
// Platform specific but identical code for all the platforms.
void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
void Assembler::RecordDeoptReason(const int reason,
const SourcePosition position) {
if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
EnsureSpace ensure_space(this);
int raw_position = position.IsUnknown() ? 0 : position.raw();
RecordRelocInfo(RelocInfo::POSITION, raw_position);
RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
}

31
deps/v8/src/assembler.h

@ -379,6 +379,9 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@ -394,10 +397,6 @@ class RelocInfo {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
// Encoded internal reference, used only on MIPS and MIPS64.
// Re-uses previous ARM-only encoding, to fit in RealRelocMode space.
INTERNAL_REFERENCE_ENCODED = CONST_POOL,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
@ -406,7 +405,7 @@ class RelocInfo {
LAST_GCED_ENUM = CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED
};
RelocInfo() {}
@ -476,6 +475,9 @@ class RelocInfo {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUG_BREAK;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@ -575,9 +577,17 @@ class RelocInfo {
// place, ready to be patched with the target.
INLINE(int target_address_size());
// Read/modify the reference in the instruction this relocation
// applies to; can only be called if rmode_ is external_reference
INLINE(Address target_reference());
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
INLINE(Address target_external_reference());
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
INLINE(Address target_internal_reference());
// Return the reference address this relocation applies to;
// can only be called if rmode_ is INTERNAL_REFERENCE.
INLINE(Address target_internal_reference_address());
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
@ -595,9 +605,6 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Patch the code with some other code.
void PatchCode(byte* instructions, int instruction_count);
// Patch the code with a call.
void PatchCodeWithCall(Address target, int guard_bytes);
@ -951,8 +958,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
static ExternalReference address_of_has_pending_message(Isolate* isolate);
static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();

12
deps/v8/src/ast-numbering.cc

@ -6,7 +6,6 @@
#include "src/ast.h"
#include "src/ast-numbering.h"
#include "src/compiler.h"
#include "src/scopes.h"
namespace v8 {
@ -18,6 +17,8 @@ class AstNumberingVisitor FINAL : public AstVisitor {
explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
properties_(zone),
ic_slot_cache_(FLAG_vector_ics ? 4 : 0),
dont_optimize_reason_(kNoReason) {
InitializeAstVisitor(isolate, zone);
}
@ -60,14 +61,15 @@ class AstNumberingVisitor FINAL : public AstVisitor {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
FeedbackVectorRequirements reqs =
node->ComputeFeedbackRequirements(isolate());
node->ComputeFeedbackRequirements(isolate(), &ic_slot_cache_);
if (reqs.slots() > 0) {
node->SetFirstFeedbackSlot(FeedbackVectorSlot(properties_.slots()));
properties_.increase_slots(reqs.slots());
}
if (reqs.ic_slots() > 0) {
int ic_slots = properties_.ic_slots();
node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots));
node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots),
&ic_slot_cache_);
properties_.increase_ic_slots(reqs.ic_slots());
if (FLAG_vector_ics) {
for (int i = 0; i < reqs.ic_slots(); i++) {
@ -81,6 +83,9 @@ class AstNumberingVisitor FINAL : public AstVisitor {
int next_id_;
AstProperties properties_;
// The slot cache allows us to reuse certain vector IC slots. It's only used
// if FLAG_vector_ics is true.
ICSlotCache ic_slot_cache_;
BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@ -186,7 +191,6 @@ void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
DisableOptimization(kImportDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}

66
deps/v8/src/ast-value-factory.h

@ -230,37 +230,41 @@ class AstValue : public ZoneObject {
// For generating constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(constructor, "constructor") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_module, ".module") \
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(get_template_callsite, "GetTemplateCallSite") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(is_construct_call, "_IsConstructCall") \
F(let, "let") \
F(make_reference_error, "MakeReferenceErrorEmbedded") \
F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
F(make_type_error, "MakeTypeErrorEmbedded") \
F(native, "native") \
F(new_target, "new.target") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(this, "this") \
F(use_asm, "use asm") \
F(use_strong, "use strong") \
F(use_strict, "use strict") \
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_module, ".module") \
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(get_template_callsite, "GetTemplateCallSite") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(is_construct_call, "_IsConstructCall") \
F(is_spec_object, "_IsSpecObject") \
F(let, "let") \
F(make_reference_error, "MakeReferenceErrorEmbedded") \
F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
F(make_type_error, "MakeTypeErrorEmbedded") \
F(native, "native") \
F(new_target, "new.target") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(this, "this") \
F(throw_iterator_result_not_an_object, "ThrowIteratorResultNotAnObject") \
F(to_string, "ToString") \
F(use_asm, "use asm") \
F(use_strong, "use strong") \
F(use_strict, "use strict") \
F(value, "value")
#define OTHER_CONSTANTS(F) \

95
deps/v8/src/ast.cc

@ -59,24 +59,29 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
}
VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
: Expression(zone, position),
VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
int end_position)
: Expression(zone, start_position),
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(var->raw_name()) {
raw_name_(var->raw_name()),
end_position_(end_position) {
BindTo(var);
}
VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
int position)
: Expression(zone, position),
bit_field_(IsThisField::encode(is_this) | IsAssignedField::encode(false) |
VariableProxy::VariableProxy(Zone* zone, const AstRawString* name,
Variable::Kind variable_kind, int start_position,
int end_position)
: Expression(zone, start_position),
bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
raw_name_(name) {}
raw_name_(name),
end_position_(end_position) {}
void VariableProxy::BindTo(Variable* var) {
@ -87,6 +92,35 @@ void VariableProxy::BindTo(Variable* var) {
}
void VariableProxy::SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) {
variable_feedback_slot_ = slot;
if (var()->IsUnallocated()) {
cache->Add(VariableICSlotPair(var(), slot));
}
}
FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
if (var()->IsUnallocated()) {
for (int i = 0; i < cache->length(); i++) {
VariableICSlotPair& pair = cache->at(i);
if (pair.variable() == var()) {
variable_feedback_slot_ = pair.slot();
return FeedbackVectorRequirements(0, 0);
}
}
}
return FeedbackVectorRequirements(0, 1);
}
return FeedbackVectorRequirements(0, 0);
}
Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(zone, pos),
@ -562,7 +596,8 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
}
FeedbackVectorRequirements Call::ComputeFeedbackRequirements(Isolate* isolate) {
FeedbackVectorRequirements Call::ComputeFeedbackRequirements(
Isolate* isolate, const ICSlotCache* cache) {
int ic_slots = IsUsingCallFeedbackICSlot(isolate) ? 1 : 0;
int slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
// A Call uses either a slot or an IC slot.
@ -590,48 +625,6 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
}
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
LookupIterator* it) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<Cell>::null();
DCHECK(it->IsFound() && it->GetHolder<JSObject>().is_identical_to(global));
cell_ = it->GetPropertyCell();
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!it->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
}
return false;
}
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
FeedbackVectorSlot allocation_site_feedback_slot =
FLAG_pretenuring_call_new ? AllocationSiteFeedbackSlot()
: CallNewFeedbackSlot();
allocation_site_ =
oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
}
}
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
DCHECK(!is_computed_name());
TypeFeedbackId id = key()->AsLiteral()->LiteralFeedbackId();
SmallMapList maps;
oracle->CollectReceiverTypes(id, &maps);
receiver_type_ = maps.length() == 1 ? maps.at(0)
: Handle<Map>::null();
}
// ----------------------------------------------------------------------------
// Implementation of AstVisitor

172
deps/v8/src/ast.h

@ -165,11 +165,30 @@ class FeedbackVectorRequirements {
};
class VariableICSlotPair FINAL {
public:
VariableICSlotPair(Variable* variable, FeedbackVectorICSlot slot)
: variable_(variable), slot_(slot) {}
VariableICSlotPair()
: variable_(NULL), slot_(FeedbackVectorICSlot::Invalid()) {}
Variable* variable() const { return variable_; }
FeedbackVectorICSlot slot() const { return slot_; }
private:
Variable* variable_;
FeedbackVectorICSlot slot_;
};
typedef List<VariableICSlotPair> ICSlotCache;
class AstProperties FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
AstProperties() : node_count_(0) {}
explicit AstProperties(Zone* zone) : node_count_(0), spec_(zone) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@ -181,12 +200,12 @@ class AstProperties FINAL BASE_EMBEDDED {
int ic_slots() const { return spec_.ic_slots(); }
void increase_ic_slots(int count) { spec_.increase_ic_slots(count); }
void SetKind(int ic_slot, Code::Kind kind) { spec_.SetKind(ic_slot, kind); }
const FeedbackVectorSpec& get_spec() const { return spec_; }
const ZoneFeedbackVectorSpec* get_spec() const { return &spec_; }
private:
Flags flags_;
int node_count_;
FeedbackVectorSpec spec_;
ZoneFeedbackVectorSpec spec_;
};
@ -229,11 +248,12 @@ class AstNode: public ZoneObject {
// not really nice, but multiple inheritance would introduce yet another
// vtable entry per node, something we don't want for space reasons.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) {
Isolate* isolate, const ICSlotCache* cache) {
return FeedbackVectorRequirements(0, 0);
}
virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) { UNREACHABLE(); }
virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) {
virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) {
UNREACHABLE();
}
// Each ICSlot stores a kind of IC which the participating node should know.
@ -609,23 +629,27 @@ class ImportDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
const AstRawString* import_name() const { return import_name_; }
const AstRawString* module_specifier() const { return module_specifier_; }
void set_module_specifier(const AstRawString* module_specifier) {
DCHECK(module_specifier_ == NULL);
module_specifier_ = module_specifier;
}
InitializationFlag initialization() const OVERRIDE {
return kCreatedInitialized;
return kNeedsInitialization;
}
protected:
ImportDeclaration(Zone* zone,
VariableProxy* proxy,
Module* module,
Scope* scope,
int pos)
: Declaration(zone, proxy, LET, scope, pos),
module_(module) {
}
ImportDeclaration(Zone* zone, VariableProxy* proxy,
const AstRawString* import_name,
const AstRawString* module_specifier, Scope* scope, int pos)
: Declaration(zone, proxy, IMPORT, scope, pos),
import_name_(import_name),
module_specifier_(module_specifier) {}
private:
Module* module_;
const AstRawString* import_name_;
const AstRawString* module_specifier_;
};
@ -880,7 +904,7 @@ class ForInStatement FINAL : public ForEachStatement {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(1, 0);
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@ -942,12 +966,12 @@ class ForOfStatement FINAL : public ForEachStatement {
return subject();
}
// var iterator = subject[Symbol.iterator]();
// iterator = subject[Symbol.iterator]()
Expression* assign_iterator() const {
return assign_iterator_;
}
// var result = iterator.next();
// result = iterator.next() // with type check
Expression* next_result() const {
return next_result_;
}
@ -1414,7 +1438,6 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
Kind kind() { return kind_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
@ -1426,6 +1449,8 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
bool is_static() const { return is_static_; }
bool is_computed_name() const { return is_computed_name_; }
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
protected:
friend class AstNodeFactory;
@ -1614,9 +1639,7 @@ class VariableProxy FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
bool IsValidReferenceExpression() const OVERRIDE {
return !is_resolved() || var()->IsValidReference();
}
bool IsValidReferenceExpression() const OVERRIDE { return !is_this(); }
bool IsArguments() const { return is_resolved() && var()->is_arguments(); }
@ -1647,6 +1670,8 @@ class VariableProxy FINAL : public Expression {
bit_field_ = IsResolvedField::update(bit_field_, true);
}
int end_position() const { return end_position_; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@ -1655,13 +1680,10 @@ class VariableProxy FINAL : public Expression {
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
return FeedbackVectorRequirements(0, UsesVariableFeedbackSlot() ? 1 : 0);
}
Isolate* isolate, const ICSlotCache* cache) OVERRIDE;
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
variable_feedback_slot_ = slot;
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE;
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
FeedbackVectorICSlot VariableFeedbackSlot() {
DCHECK(!UsesVariableFeedbackSlot() || !variable_feedback_slot_.IsInvalid());
@ -1669,10 +1691,12 @@ class VariableProxy FINAL : public Expression {
}
protected:
VariableProxy(Zone* zone, Variable* var, int position);
VariableProxy(Zone* zone, Variable* var, int start_position,
int end_position);
VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
int position);
VariableProxy(Zone* zone, const AstRawString* name,
Variable::Kind variable_kind, int start_position,
int end_position);
class IsThisField : public BitField8<bool, 0, 1> {};
class IsAssignedField : public BitField8<bool, 1, 1> {};
@ -1686,6 +1710,10 @@ class VariableProxy FINAL : public Expression {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
// Position is stored in the AstNode superclass, but VariableProxy needs to
// know its end position too (for error messages). It cannot be inferred from
// the variable name length because it can contain escapes.
int end_position_;
};
@ -1738,10 +1766,11 @@ class Property FINAL : public Expression {
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE {
property_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
@ -1788,8 +1817,9 @@ class Call FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE;
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE;
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE {
ic_slot_or_slot_ = slot.ToInt();
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@ -1832,15 +1862,16 @@ class Call FINAL : public Expression {
Handle<JSFunction> target() { return target_; }
Handle<Cell> cell() { return cell_; }
Handle<AllocationSite> allocation_site() { return allocation_site_; }
void SetKnownGlobalTarget(Handle<JSFunction> target) {
target_ = target;
set_is_uninitialized(false);
}
void set_target(Handle<JSFunction> target) { target_ = target; }
void set_allocation_site(Handle<AllocationSite> site) {
allocation_site_ = site;
}
bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupIterator* it);
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
@ -1895,7 +1926,6 @@ class Call FINAL : public Expression {
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
Handle<Cell> cell_;
Handle<AllocationSite> allocation_site_;
class IsUninitializedField : public BitField8<bool, 0, 1> {};
uint8_t bit_field_;
@ -1911,7 +1941,7 @@ class CallNew FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(FLAG_pretenuring_call_new ? 2 : 1, 0);
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@ -1927,7 +1957,6 @@ class CallNew FINAL : public Expression {
return CallNewFeedbackSlot().next();
}
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
Handle<AllocationSite> allocation_site() const {
@ -1938,6 +1967,16 @@ class CallNew FINAL : public Expression {
static int feedback_slots() { return 1; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
void set_allocation_site(Handle<AllocationSite> site) {
allocation_site_ = site;
}
void set_is_monomorphic(bool monomorphic) { is_monomorphic_ = monomorphic; }
void set_target(Handle<JSFunction> target) { target_ = target; }
void SetKnownGlobalTarget(Handle<JSFunction> target) {
target_ = target;
is_monomorphic_ = true;
}
protected:
CallNew(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
@ -1980,10 +2019,11 @@ class CallRuntime FINAL : public Expression {
return FLAG_vector_ics && is_jsruntime();
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, HasCallRuntimeFeedbackSlot() ? 1 : 0);
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE {
callruntime_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
@ -2072,12 +2112,11 @@ class BinaryOperation FINAL : public Expression {
return TypeFeedbackId(local_id(1));
}
Maybe<int> fixed_right_arg() const {
return has_fixed_right_arg_ ? Maybe<int>(fixed_right_arg_value_)
: Maybe<int>();
return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>();
}
void set_fixed_right_arg(Maybe<int> arg) {
has_fixed_right_arg_ = arg.has_value;
if (arg.has_value) fixed_right_arg_value_ = arg.value;
has_fixed_right_arg_ = arg.IsJust();
if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
}
virtual void RecordToBooleanTypeFeedback(
@ -2354,10 +2393,11 @@ class Yield FINAL : public Expression {
return FLAG_vector_ics && (yield_kind() == kDelegating);
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, HasFeedbackSlots() ? 3 : 0);
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE {
yield_first_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
@ -2534,7 +2574,7 @@ class FunctionLiteral FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
const FeedbackVectorSpec& feedback_vector_spec() const {
const ZoneFeedbackVectorSpec* feedback_vector_spec() const {
return ast_properties_.get_spec();
}
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
@ -2558,6 +2598,7 @@ class FunctionLiteral FINAL : public Expression {
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
ast_properties_(zone),
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@ -2598,7 +2639,7 @@ class FunctionLiteral FINAL : public Expression {
class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
class FunctionKindBits : public BitField<FunctionKind, 6, 7> {};
class FunctionKindBits : public BitField<FunctionKind, 6, 8> {};
};
@ -2695,10 +2736,11 @@ class SuperReference FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
Isolate* isolate) OVERRIDE {
Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
}
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
ICSlotCache* cache) OVERRIDE {
homeobject_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
@ -3171,10 +3213,11 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
}
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope,
int pos) {
return new (zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
const AstRawString* import_name,
const AstRawString* module_specifier,
Scope* scope, int pos) {
return new (zone_) ImportDeclaration(zone_, proxy, import_name,
module_specifier, scope, pos);
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
@ -3369,14 +3412,17 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
}
VariableProxy* NewVariableProxy(Variable* var,
int pos = RelocInfo::kNoPosition) {
return new (zone_) VariableProxy(zone_, var, pos);
int start_position = RelocInfo::kNoPosition,
int end_position = RelocInfo::kNoPosition) {
return new (zone_) VariableProxy(zone_, var, start_position, end_position);
}
VariableProxy* NewVariableProxy(const AstRawString* name,
bool is_this,
int position = RelocInfo::kNoPosition) {
return new (zone_) VariableProxy(zone_, name, is_this, position);
Variable::Kind variable_kind,
int start_position = RelocInfo::kNoPosition,
int end_position = RelocInfo::kNoPosition) {
return new (zone_)
VariableProxy(zone_, name, variable_kind, start_position, end_position);
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {

45
deps/v8/src/background-parsing-task.cc

@ -10,27 +10,35 @@ namespace internal {
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
: source_(source), options_(options), stack_size_(stack_size) {
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
source->info.Reset(new i::CompilationInfoWithZone(source->source_stream.get(),
source->encoding, isolate));
source->info->MarkAsGlobal();
: source_(source), stack_size_(stack_size) {
// We don't set the context to the CompilationInfo yet, because the background
// thread cannot do anything with it anyway. We set it just before compilation
// on the foreground thread.
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kNoCompileOptions);
source->allow_lazy =
!i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
if (!source->allow_lazy && options_ == ScriptCompiler::kProduceParserCache) {
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
Zone* zone = new Zone();
ParseInfo* info = new ParseInfo(zone);
source->zone.Reset(zone);
source->info.Reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding);
info->set_hash_seed(isolate->heap()->HashSeed());
info->set_global();
info->set_unicode_cache(&source_->unicode_cache);
bool disable_lazy = Compiler::DebuggerWantsEagerCompilation(isolate);
if (disable_lazy && options == ScriptCompiler::kProduceParserCache) {
// Producing cached data while parsing eagerly is not supported.
options_ = ScriptCompiler::kNoCompileOptions;
options = ScriptCompiler::kNoCompileOptions;
}
source->hash_seed = isolate->heap()->HashSeed();
info->set_compile_options(options);
info->set_allow_lazy_parsing(!disable_lazy);
}
@ -40,20 +48,19 @@ void BackgroundParsingTask::Run() {
DisallowHandleDereference no_deref;
ScriptData* script_data = NULL;
if (options_ == ScriptCompiler::kProduceParserCache ||
options_ == ScriptCompiler::kProduceCodeCache) {
source_->info->SetCachedData(&script_data, options_);
ScriptCompiler::CompileOptions options = source_->info->compile_options();
if (options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache) {
source_->info->set_cached_data(&script_data);
}
uintptr_t stack_limit =
reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
source_->info->set_stack_limit(stack_limit);
// Parser needs to stay alive for finalizing the parsing on the main
// thread. Passing &parse_info is OK because Parser doesn't store it.
source_->parser.Reset(new Parser(source_->info.get(), stack_limit,
source_->hash_seed,
&source_->unicode_cache));
source_->parser->set_allow_lazy(source_->allow_lazy);
source_->parser.Reset(new Parser(source_->info.get()));
source_->parser->ParseOnBackground(source_->info.get());
if (script_data != NULL) {

13
deps/v8/src/background-parsing-task.h

@ -14,18 +14,13 @@
namespace v8 {
namespace internal {
class Parser;
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
// data which needs to be transmitted between threads for background parsing,
// finalizing it on the main thread, and compiling on the main thread.
struct StreamedSource {
StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding)
: source_stream(source_stream),
encoding(encoding),
hash_seed(0),
allow_lazy(false) {}
: source_stream(source_stream), encoding(encoding) {}
// Internal implementation of v8::ScriptCompiler::StreamedSource.
SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
@ -36,9 +31,8 @@ struct StreamedSource {
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
SmartPointer<CompilationInfo> info;
uint32_t hash_seed;
bool allow_lazy;
SmartPointer<Zone> zone;
SmartPointer<ParseInfo> info;
SmartPointer<Parser> parser;
private:
@ -58,7 +52,6 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
private:
StreamedSource* source_; // Not owned.
ScriptCompiler::CompileOptions options_;
int stack_size_;
};
}

15
deps/v8/src/bailout-reason.h

@ -116,19 +116,10 @@ namespace internal {
"Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
"Inlined runtime function: FastOneByteArrayJoin") \
V(kInlinedRuntimeFunctionGeneratorNext, \
"Inlined runtime function: GeneratorNext") \
V(kInlinedRuntimeFunctionGeneratorThrow, \
"Inlined runtime function: GeneratorThrow") \
V(kInlinedRuntimeFunctionGetFromCache, \
"Inlined runtime function: GetFromCache") \
V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
"Inlined runtime function: IsNonNegativeSmi") \
V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
"Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
@ -310,7 +301,6 @@ namespace internal {
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
@ -345,7 +335,8 @@ enum BailoutReason {
const char* GetBailoutReason(BailoutReason reason);
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_BAILOUT_REASON_H_

13
deps/v8/src/base/bits.h

@ -148,17 +148,30 @@ inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
}
// Precondition: 0 <= shift < 32
inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
if (shift == 0) return value;
return (value >> shift) | (value << (32 - shift));
}
// Precondition: 0 <= shift < 32
inline uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
if (shift == 0) return value;
return (value << shift) | (value >> (32 - shift));
}
// Precondition: 0 <= shift < 64
inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
if (shift == 0) return value;
return (value >> shift) | (value << (64 - shift));
}
// Precondition: 0 <= shift < 64
inline uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
if (shift == 0) return value;
return (value << shift) | (value >> (64 - shift));
}
// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and

2
deps/v8/src/base/cpu.cc

@ -385,6 +385,8 @@ CPU::CPU()
case 0x37: // SLM
case 0x4a:
case 0x4d:
case 0x4c: // AMT
case 0x6e:
is_atom_ = true;
}
}

25
deps/v8/src/base/logging.cc

@ -5,10 +5,11 @@
#include "src/base/logging.h"
#if V8_LIBC_GLIBC || V8_OS_BSD
# include <cxxabi.h>
# include <execinfo.h>
#include <cxxabi.h>
#include <dlfcn.h>
#include <execinfo.h>
#elif V8_OS_QNX
# include <backtrace.h>
#include <backtrace.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <cstdio>
@ -54,28 +55,24 @@ void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, arraysize(trace));
char** symbols = backtrace_symbols(trace, size);
OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
OS::PrintError("(empty)\n");
} else if (symbols == NULL) {
OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
OS::PrintError("%2d: ", i);
char mangled[201];
if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
int status;
size_t length;
char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
Dl_info info;
char* demangled = NULL;
if (!dladdr(trace[i], &info) || !info.dli_sname) {
OS::PrintError("%p\n", trace[i]);
} else if ((demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, 0))) {
OS::PrintError("%s\n", demangled);
free(demangled);
} else {
OS::PrintError("??\n");
OS::PrintError("%s\n", info.dli_sname);
}
}
}
free(symbols);
#elif V8_OS_QNX
char out[1024];
bt_accessor_t acc;

6
deps/v8/src/base/platform/platform-freebsd.cc

@ -188,7 +188,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
@ -260,7 +260,7 @@ void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
@ -288,7 +288,7 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}

5
deps/v8/src/base/platform/platform-posix.cc

@ -358,6 +358,11 @@ bool OS::Remove(const char* path) {
}
bool OS::isDirectorySeparator(const char ch) {
return ch == '/';
}
FILE* OS::OpenTemporaryFile() {
return tmpfile();
}

5
deps/v8/src/base/platform/platform-win32.cc

@ -575,6 +575,11 @@ bool OS::Remove(const char* path) {
}
bool OS::isDirectorySeparator(const char ch) {
return ch == '/' || ch == '\\';
}
FILE* OS::OpenTemporaryFile() {
// tmpfile_s tries to use the root dir, don't use it.
char tempPathBuffer[MAX_PATH];

2
deps/v8/src/base/platform/platform.h

@ -142,6 +142,8 @@ class OS {
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
static bool isDirectorySeparator(const char ch);
// Opens a temporary file, the file is auto removed on close.
static FILE* OpenTemporaryFile();

268
deps/v8/src/bootstrapper.cc

@ -13,8 +13,8 @@
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
#include "src/isolate-inl.h"
#include "src/natives.h"
#include "src/snapshot.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "third_party/fdlibm/fdlibm.h"
namespace v8 {
@ -140,6 +140,7 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetGeneratorPoisonFunction();
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
// Make the "arguments" and "caller" properties throw a TypeError on access.
void PoisonArgumentsAndCaller(Handle<Map> map);
@ -256,18 +257,21 @@ class Genesis BASE_EMBEDDED {
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
}
Handle<Map> CreateFunctionMap(FunctionMode function_mode);
Handle<Map> CreateSloppyFunctionMap(FunctionMode function_mode);
void SetFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
void MakeFunctionInstancePrototypeWritable();
Handle<Map> CreateStrictFunctionMap(
FunctionMode function_mode,
Handle<JSFunction> empty_function);
Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
Handle<JSFunction> empty_function);
Handle<Map> CreateStrongFunctionMap(Handle<JSFunction> empty_function,
bool is_constructor);
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@ -335,7 +339,7 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
global_proxy->map()->set_constructor(*factory->null_value());
global_proxy->map()->SetConstructor(*factory->null_value());
if (FLAG_track_detached_contexts) {
env->GetIsolate()->AddDetachedContext(env);
}
@ -378,51 +382,53 @@ void Genesis::SetFunctionInstanceDescriptor(
int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
Map::EnsureDescriptorSlack(map, size);
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), attribs);
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
{ // Add length.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
length, attribs);
length, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), attribs);
Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
attribs);
roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> args =
Accessors::FunctionArgumentsInfo(isolate(), attribs);
Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
{ // Add arguments.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
attribs);
ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> caller =
Accessors::FunctionCallerInfo(isolate(), attribs);
Accessors::FunctionCallerInfo(isolate(), ro_attribs);
{ // Add caller.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
caller, attribs);
caller, ro_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
ro_attribs = static_cast<PropertyAttributes>(ro_attribs & ~READ_ONLY);
}
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), attribs);
Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
prototype, attribs);
prototype, ro_attribs);
map->AppendDescriptor(&d);
}
}
Handle<Map> Genesis::CreateFunctionMap(FunctionMode function_mode) {
Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
@ -437,7 +443,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
@ -445,7 +451,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map =
CreateFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
native_context()->set_sloppy_function_map(*function_map);
native_context()->set_sloppy_function_with_readonly_prototype_map(
*function_map);
@ -453,7 +459,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
sloppy_function_map_writable_prototype_ =
CreateFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
@ -501,7 +507,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map =
CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
DCHECK(!empty_function_map->is_dictionary_map());
empty_function_map->SetPrototype(object_function_prototype);
empty_function_map->set_is_prototype_map(true);
@ -536,6 +542,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
// Add length.
if (function_mode == BOUND_FUNCTION) {
@ -547,16 +555,16 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
function_mode == FUNCTION_WITHOUT_PROTOTYPE);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), ro_attribs);
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
length, ro_attribs);
length, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), ro_attribs);
Accessors::FunctionNameInfo(isolate(), roc_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
ro_attribs);
roc_attribs);
map->AppendDescriptor(&d);
}
{ // Add arguments.
@ -583,6 +591,29 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
}
void Genesis::SetStrongFunctionInstanceDescriptor(Handle<Map> map) {
Map::EnsureDescriptorSlack(map, 2);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), ro_attribs);
{ // Add length.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
length, ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
ro_attribs);
map->AppendDescriptor(&d);
}
}
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
if (strict_poison_function.is_null()) {
@ -628,6 +659,18 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
}
Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
map->set_function_with_prototype(is_constructor);
map->SetPrototype(empty_function);
map->set_is_extensible(is_constructor);
// TODO(rossberg): mark strong
return map;
}
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_function_without_prototype_map =
@ -659,6 +702,16 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
}
void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for strong mode instances, which never have prototypes.
Handle<Map> strong_function_map = CreateStrongFunctionMap(empty, false);
native_context()->set_strong_function_map(*strong_function_map);
// Constructors do, though.
Handle<Map> strong_constructor_map = CreateStrongFunctionMap(empty, true);
native_context()->set_strong_constructor_map(*strong_constructor_map);
}
static void SetAccessors(Handle<Map> map,
Handle<String> name,
Handle<JSFunction> func) {
@ -1264,8 +1317,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
map->set_inobject_properties(1);
// Copy constructor from the sloppy arguments boilerplate.
map->set_constructor(
native_context()->sloppy_arguments_map()->constructor());
map->SetConstructor(
native_context()->sloppy_arguments_map()->GetConstructor());
native_context()->set_strict_arguments_map(*map);
@ -1412,8 +1465,8 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
function_info = Compiler::CompileScript(
source, script_name, 0, 0, false, false, top_context, extension, NULL,
ScriptCompiler::kNoCompileOptions,
source, script_name, 0, 0, false, false, Handle<Object>(), top_context,
extension, NULL, ScriptCompiler::kNoCompileOptions,
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE, false);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
@ -1485,7 +1538,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "$createDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
@ -1501,7 +1554,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
INSTALL_NATIVE(JSFunction, "IsPromise", is_promise);
INSTALL_NATIVE(Symbol, "promiseStatus", promise_status);
INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
@ -1598,9 +1651,7 @@ void Genesis::InitializeBuiltinTypedArrays() {
#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
void Genesis::InstallNativeFunctions_##id() {}
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_scoping)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_strings)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
@ -1609,12 +1660,12 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_numeric_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_templates)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_computed_property_names)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_rest_parameters)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_reflect)
void Genesis::InstallNativeFunctions_harmony_proxies() {
@ -1631,9 +1682,7 @@ void Genesis::InstallNativeFunctions_harmony_proxies() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_scoping)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strings)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
@ -1642,7 +1691,6 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_templates)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_computed_property_names)
@ -1653,10 +1701,9 @@ void Genesis::InitializeGlobal_harmony_regexps() {
Handle<HeapObject> flag(FLAG_harmony_regexps ? heap()->true_value()
: heap()->false_value());
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
flag, attributes).Assert();
Runtime::SetObjectProperty(isolate(), builtins,
factory()->harmony_regexps_string(), flag,
STRICT).Assert();
}
@ -1665,11 +1712,51 @@ void Genesis::InitializeGlobal_harmony_unicode_regexps() {
Handle<HeapObject> flag(FLAG_harmony_unicode_regexps ? heap()->true_value()
: heap()->false_value());
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
Runtime::DefineObjectProperty(builtins,
factory()->harmony_unicode_regexps_string(),
flag, attributes).Assert();
Runtime::SetObjectProperty(isolate(), builtins,
factory()->harmony_unicode_regexps_string(), flag,
STRICT).Assert();
}
void Genesis::InitializeGlobal_harmony_reflect() {
if (!FLAG_harmony_reflect) return;
Handle<JSObject> builtins(native_context()->builtins());
// Install references to functions of the Reflect object
{
Handle<JSFunction> apply =
InstallFunction(builtins, "ReflectApply", JS_OBJECT_TYPE,
JSObject::kHeaderSize, MaybeHandle<JSObject>(),
Builtins::kReflectApply);
Handle<JSFunction> construct =
InstallFunction(builtins, "ReflectConstruct", JS_OBJECT_TYPE,
JSObject::kHeaderSize, MaybeHandle<JSObject>(),
Builtins::kReflectConstruct);
if (FLAG_vector_ics) {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
FeedbackVectorSpec spec(0, Code::CALL_IC);
Handle<TypeFeedbackVector> feedback_vector =
factory()->NewTypeFeedbackVector(&spec);
apply->shared()->set_feedback_vector(*feedback_vector);
feedback_vector = factory()->NewTypeFeedbackVector(&spec);
construct->shared()->set_feedback_vector(*feedback_vector);
}
apply->shared()->set_internal_formal_parameter_count(3);
apply->shared()->set_length(3);
construct->shared()->set_internal_formal_parameter_count(3);
construct->shared()->set_length(2);
}
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context()->global_object()));
Handle<String> reflect_string =
factory()->NewStringFromStaticChars("Reflect");
Handle<Object> reflect =
factory()->NewJSObject(isolate()->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
}
@ -2023,6 +2110,13 @@ bool Genesis::InstallNatives() {
native_context()->set_strict_generator_function_map(
*strict_generator_function_map);
Handle<Map> strong_function_map(native_context()->strong_function_map());
Handle<Map> strong_generator_function_map =
Map::Copy(strong_function_map, "StrongGeneratorFunction");
strong_generator_function_map->SetPrototype(generator_function_prototype);
native_context()->set_strong_generator_function_map(
*strong_generator_function_map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
generator_object_prototype_map->SetPrototype(generator_object_prototype);
@ -2047,14 +2141,12 @@ bool Genesis::InstallNatives() {
}
// Install natives.
for (int i = Natives::GetDebuggerCount();
i < Natives::GetBuiltinsCount();
i++) {
int i = Natives::GetDebuggerCount();
if (!CompileBuiltin(isolate(), i)) return false;
if (!InstallJSBuiltins(builtins)) return false;
for (++i; i < Natives::GetBuiltinsCount(); ++i) {
if (!CompileBuiltin(isolate(), i)) return false;
// TODO(ager): We really only need to install the JS builtin
// functions on the builtins object after compiling and running
// runtime.js.
if (!InstallJSBuiltins(builtins)) return false;
}
InstallNativeFunctions();
@ -2090,10 +2182,9 @@ bool Genesis::InstallNatives() {
if (FLAG_vector_ics) {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
FeedbackVectorSpec spec(0, 1);
spec.SetKind(0, Code::CALL_IC);
FeedbackVectorSpec spec(0, Code::CALL_IC);
Handle<TypeFeedbackVector> feedback_vector =
factory()->NewTypeFeedbackVector(spec);
factory()->NewTypeFeedbackVector(&spec);
apply->shared()->set_feedback_vector(*feedback_vector);
}
@ -2126,7 +2217,7 @@ bool Genesis::InstallNatives() {
// Add initial map.
Handle<Map> initial_map =
factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
initial_map->set_constructor(*array_constructor);
initial_map->SetConstructor(*array_constructor);
// Set prototype on map.
initial_map->set_non_instance_prototype(false);
@ -2213,11 +2304,8 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_array_includes_natives[] = {
"native harmony-array-includes.js", NULL};
static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
static const char* harmony_strings_natives[] = {"native harmony-string.js",
NULL};
static const char* harmony_classes_natives[] = {NULL};
static const char* harmony_modules_natives[] = {NULL};
static const char* harmony_scoping_natives[] = {NULL};
static const char* harmony_object_literals_natives[] = {NULL};
static const char* harmony_regexps_natives[] = {
"native harmony-regexp.js", NULL};
@ -2225,13 +2313,13 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_numeric_literals_natives[] = {NULL};
static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
NULL};
static const char* harmony_templates_natives[] = {
"native harmony-templates.js", NULL};
static const char* harmony_sloppy_natives[] = {NULL};
static const char* harmony_unicode_natives[] = {NULL};
static const char* harmony_unicode_regexps_natives[] = {NULL};
static const char* harmony_computed_property_names_natives[] = {NULL};
static const char* harmony_rest_parameters_natives[] = {NULL};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
NULL};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@ -2269,15 +2357,24 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope(isolate());
struct BuiltinFunctionIds {
const char* holder_expr;
const char* fun_name;
BuiltinFunctionId id;
};
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
native_context(), #holder_expr); \
BuiltinFunctionId id = k##name; \
InstallBuiltinFunctionId(holder, #fun_name, id); \
}
FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
{ #holder_expr, #fun_name, k##name } \
,
const BuiltinFunctionIds builtins[] = {
FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)};
#undef INSTALL_BUILTIN_ID
for (const BuiltinFunctionIds& builtin : builtins) {
Handle<JSObject> holder =
ResolveBuiltinIdHolder(native_context(), builtin.holder_expr);
InstallBuiltinFunctionId(holder, builtin.fun_name, builtin.id);
}
}
@ -2543,15 +2640,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<Object> function_object = Object::GetProperty(
isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
// TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
// the correct solution is to restore the context register after invoking
// builtins from full-codegen.
function->shared()->DisableOptimization(kBuiltinFunctionCannotBeOptimized);
builtins->set_javascript_builtin(id, *function);
if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
return false;
}
builtins->set_javascript_builtin_code(id, function->shared()->code());
}
return true;
}
@ -2650,7 +2739,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
@ -2674,8 +2764,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
isolate());
DCHECK(!value->IsCell());
if (value->IsPropertyCell()) {
value = Handle<Object>(PropertyCell::cast(*value)->value(),
isolate());
value = handle(PropertyCell::cast(*value)->value(), isolate());
}
PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(kData, details.kind());
@ -2813,6 +2902,7 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateStrongModeFunctionMaps(empty_function);
Handle<GlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
@ -2827,9 +2917,13 @@ Genesis::Genesis(Isolate* isolate,
isolate->counters()->contexts_created_from_scratch()->Increment();
}
// Install experimental natives.
if (!InstallExperimentalNatives()) return;
InitializeExperimentalGlobal();
// Install experimental natives. Do not include them into the snapshot as we
// should be able to turn them off at runtime. Re-installing them after
// they have already been deserialized would also fail.
if (!isolate->serializer_enabled()) {
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
}
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.

13
deps/v8/src/builtins.cc

@ -1044,6 +1044,17 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
DCHECK(!args[0]->IsNull());
if (args[0]->IsUndefined()) args[0] = function->global_proxy();
if (!is_construct && !fun_data->accept_any_receiver()) {
Handle<Object> receiver(&args[0]);
if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
if (!isolate->MayAccess(js_receiver)) {
isolate->ReportFailedAccessCheck(js_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
}
}
Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, args[0]);
if (raw_holder->IsNull()) {
@ -1185,7 +1196,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// Get the invocation callback from the function descriptor that was
// used to create the called object.
DCHECK(obj->map()->has_instance_call_handler());
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
// TODO(ishell): turn this back to a DCHECK.
CHECK(constructor->shared()->IsApiFunction());
Object* handler =

6
deps/v8/src/builtins.h

@ -109,6 +109,8 @@ enum BuiltinExtraArguments {
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@ -193,6 +195,8 @@ enum BuiltinExtraArguments {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
V(REFLECT_APPLY_PREPARE, 1) \
V(REFLECT_CONSTRUCT_PREPARE, 2) \
V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
@ -316,6 +320,8 @@ class Builtins {
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
static void Generate_ReflectApply(MacroAssembler* masm);
static void Generate_ReflectConstruct(MacroAssembler* masm);
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);

7
deps/v8/src/char-predicates-inl.h

@ -35,6 +35,13 @@ inline bool IsInRange(int value, int lower_limit, int higher_limit) {
static_cast<unsigned int>(higher_limit - lower_limit);
}
inline bool IsAsciiIdentifier(uc32 c) {
return IsAlphaNumeric(c) || c == '$' || c == '_';
}
inline bool IsAlphaNumeric(uc32 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c);
}
inline bool IsDecimalDigit(uc32 c) {
// ECMA-262, 3rd, 7.8.3 (p 16)

2
deps/v8/src/char-predicates.h

@ -15,6 +15,8 @@ namespace internal {
inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
inline bool IsAsciiIdentifier(uc32 c);
inline bool IsAlphaNumeric(uc32 c);
inline bool IsDecimalDigit(uc32 c);
inline bool IsHexDigit(uc32 c);
inline bool IsOctalDigit(uc32 c);

56
deps/v8/src/code-factory.cc

@ -11,6 +11,15 @@
namespace v8 {
namespace internal {
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate,
Handle<GlobalObject> global,
Handle<String> name) {
return Callable(LoadIC::load_global(isolate, global, name),
LoadDescriptor(isolate));
}
// static
Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
return Callable(
@ -20,14 +29,15 @@ Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
// static
Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate,
ContextualMode mode) {
Callable CodeFactory::LoadICInOptimizedCode(
Isolate* isolate, ContextualMode mode,
InlineCacheState initialization_state) {
auto code = LoadIC::initialize_stub_in_optimized_code(
isolate, LoadICState(mode).GetExtraICState(), initialization_state);
if (FLAG_vector_ics) {
return Callable(LoadIC::initialize_stub_in_optimized_code(
isolate, LoadICState(mode).GetExtraICState()),
VectorLoadICDescriptor(isolate));
return Callable(code, VectorLoadICDescriptor(isolate));
}
return CodeFactory::LoadIC(isolate, mode);
return Callable(code, LoadDescriptor(isolate));
}
@ -39,12 +49,14 @@ Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
Callable CodeFactory::KeyedLoadICInOptimizedCode(
Isolate* isolate, InlineCacheState initialization_state) {
auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
isolate, initialization_state);
if (FLAG_vector_ics) {
return Callable(KeyedLoadIC::initialize_stub_in_optimized_code(isolate),
VectorLoadICDescriptor(isolate));
return Callable(code, VectorLoadICDescriptor(isolate));
}
return CodeFactory::KeyedLoadIC(isolate);
return Callable(code, LoadDescriptor(isolate));
}
@ -67,25 +79,35 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(StoreIC::initialize_stub(isolate, language_mode),
StoreDescriptor(isolate));
return Callable(
StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
StoreDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
Handle<Code> ic = is_strict(language_mode)
? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate->builtins()->KeyedStoreIC_Initialize();
return Callable(ic, StoreDescriptor(isolate));
return Callable(
KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
StoreDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
return Callable(KeyedStoreIC::initialize_stub(isolate, language_mode,
initialization_state),
StoreDescriptor(isolate));
}
// static
Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
return Callable(code, BinaryOpDescriptor(isolate));
return Callable(code, CompareDescriptor(isolate));
}

11
deps/v8/src/code-factory.h

@ -32,16 +32,23 @@ class Callable FINAL BASE_EMBEDDED {
class CodeFactory FINAL {
public:
// Initial states for ICs.
static Callable LoadGlobalIC(Isolate* isolate, Handle<GlobalObject> global,
Handle<String> name);
static Callable LoadIC(Isolate* isolate, ContextualMode mode);
static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode);
static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode,
InlineCacheState initialization_state);
static Callable KeyedLoadIC(Isolate* isolate);
static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
static Callable KeyedLoadICInOptimizedCode(
Isolate* isolate, InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
static Callable CompareIC(Isolate* isolate, Token::Value op);

237
deps/v8/src/code-stubs-hydrogen.cc

@ -35,7 +35,7 @@ static LChunk* OptimizeGraph(HGraph* graph) {
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
explicit CodeStubGraphBuilderBase(CompilationInfoWithZone* info)
explicit CodeStubGraphBuilderBase(CompilationInfo* info)
: HGraphBuilder(info),
arguments_length_(NULL),
info_(info),
@ -100,21 +100,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* shared_info,
HValue* native_context);
// Tail calls handler found at array[map_index + 1].
void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
HValue* map_index, HValue* slot, HValue* vector);
// Tail calls handler_code.
void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
HValue* vector, HValue* handler_code);
void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
HValue* vector, bool keyed_load);
// Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
HValue* slot, HValue* vector, bool keyed_load);
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@ -122,7 +107,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone* info_;
CompilationInfo* info_;
CodeStubDescriptor descriptor_;
HContext* context_;
};
@ -205,7 +190,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
explicit CodeStubGraphBuilder(CompilationInfo* info)
: CodeStubGraphBuilderBase(info) {}
protected:
@ -287,7 +272,8 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
CompilationInfoWithZone info(stub, isolate);
Zone zone;
CompilationInfo info(stub, isolate, &zone);
CodeStubGraphBuilder<Stub> builder(&info);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
Handle<Code> code = chunk->Codegen();
@ -1354,7 +1340,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub::property_cell_placeholder(isolate())));
HValue* cell = Add<HLoadNamedField>(weak_cell, nullptr,
HObjectAccess::ForWeakCellValue());
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
Add<HCheckHeapObject>(cell);
HObjectAccess access = HObjectAccess::ForPropertyCellValue();
HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
if (stub->is_constant()) {
@ -1374,8 +1361,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
builder.Then();
builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
builder.Else();
HStoreNamedField* store = Add<HStoreNamedField>(cell, access, value);
store->MarkReceiverAsCell();
Add<HStoreNamedField>(cell, access, value);
builder.End();
}
@ -1726,7 +1712,7 @@ template <>
class CodeStubGraphBuilder<KeyedLoadGenericStub>
: public CodeStubGraphBuilderBase {
public:
explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
explicit CodeStubGraphBuilder(CompilationInfo* info)
: CodeStubGraphBuilderBase(info) {}
protected:
@ -2029,211 +2015,6 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
}
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
HValue* array, HValue* map_index,
HValue* slot, HValue* vector) {
// The handler is at array[map_index + 1]. Compute this with a custom offset
// to HLoadKeyed.
int offset =
GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
HValue* handler_code = Add<HLoadKeyed>(
array, map_index, nullptr, FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
TailCallHandler(receiver, name, slot, vector, handler_code);
}
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
HValue* slot, HValue* vector,
HValue* handler_code) {
VectorLoadICDescriptor descriptor(isolate());
HValue* op_vals[] = {context(), receiver, name, slot, vector};
Add<HCallWithDescriptor>(handler_code, 0, descriptor,
Vector<HValue*>(op_vals, 5), TAIL_CALL);
// We never return here, it is a tail call.
}
void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
HValue* slot, HValue* vector,
bool keyed_load) {
DCHECK(FLAG_vector_ics);
Add<HTailCallThroughMegamorphicCache>(
receiver, name, slot, vector,
HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
// We never return here, it is a tail call.
}
void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
HValue* name, HValue* slot,
HValue* vector,
bool keyed_load) {
HConstant* constant_two = Add<HConstant>(2);
HConstant* constant_three = Add<HConstant>(3);
IfBuilder if_receiver_heap_object(this);
if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
if_receiver_heap_object.Then();
Push(AddLoadMap(receiver, nullptr));
if_receiver_heap_object.Else();
HConstant* heap_number_map =
Add<HConstant>(isolate()->factory()->heap_number_map());
Push(heap_number_map);
if_receiver_heap_object.End();
HValue* receiver_map = Pop();
HValue* start =
keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
HValue* weak_cell =
Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
// Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
// against the receiver_map.
HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
HObjectAccess::ForWeakCellValue());
IfBuilder if_correct_map(this);
if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
if_correct_map.Then();
{ TailCallHandler(receiver, name, array, start, slot, vector); }
if_correct_map.Else();
{
// If our array has more elements, the ic is polymorphic. Look for the
// receiver map in the rest of the array.
HValue* length = AddLoadFixedArrayLength(array, nullptr);
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
constant_two);
start = keyed_load ? constant_three : constant_two;
HValue* key = builder.BeginBody(start, length, Token::LT);
{
HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
ALLOW_RETURN_HOLE);
HValue* array_map = Add<HLoadNamedField>(
weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
IfBuilder if_correct_poly_map(this);
if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
array_map);
if_correct_poly_map.Then();
{ TailCallHandler(receiver, name, array, key, slot, vector); }
}
builder.EndBody();
}
if_correct_map.End();
}
template <>
HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
// If the feedback is an array, then the IC is in the monomorphic or
// polymorphic state.
HValue* feedback =
Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder array_checker(this);
array_checker.If<HCompareMap>(feedback,
isolate()->factory()->fixed_array_map());
array_checker.Then();
{ HandleArrayCases(feedback, receiver, name, slot, vector, false); }
array_checker.Else();
{
// Is the IC megamorphic?
IfBuilder mega_checker(this);
HConstant* megamorphic_symbol =
Add<HConstant>(isolate()->factory()->megamorphic_symbol());
mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
mega_checker.Then();
{
// Probe the stub cache.
Add<HTailCallThroughMegamorphicCache>(
receiver, name, slot, vector,
HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
}
mega_checker.End();
}
array_checker.End();
TailCallMiss(receiver, name, slot, vector, false);
return graph()->GetConstant0();
}
Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
HConstant* zero = graph()->GetConstant0();
// If the feedback is an array, then the IC is in the monomorphic or
// polymorphic state.
HValue* feedback =
Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder array_checker(this);
array_checker.If<HCompareMap>(feedback,
isolate()->factory()->fixed_array_map());
array_checker.Then();
{
// If feedback[0] is 0, then the IC has element handlers and name should be
// a smi. If feedback[0] is a string, verify that it matches name.
HValue* recorded_name = Add<HLoadKeyed>(feedback, zero, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder recorded_name_is_zero(this);
recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
recorded_name_is_zero.Then();
{ Add<HCheckSmi>(name); }
recorded_name_is_zero.Else();
{
IfBuilder strings_match(this);
strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
strings_match.Then();
TailCallMiss(receiver, name, slot, vector, true);
strings_match.End();
}
recorded_name_is_zero.End();
HandleArrayCases(feedback, receiver, name, slot, vector, true);
}
array_checker.Else();
{
// Check if the IC is in megamorphic state.
IfBuilder megamorphic_checker(this);
HConstant* megamorphic_symbol =
Add<HConstant>(isolate()->factory()->megamorphic_symbol());
megamorphic_checker.If<HCompareObjectEqAndBranch>(feedback,
megamorphic_symbol);
megamorphic_checker.Then();
{
// Tail-call to the megamorphic KeyedLoadIC, treating it like a handler.
Handle<Code> stub = KeyedLoadIC::ChooseMegamorphicStub(isolate());
HValue* constant_stub = Add<HConstant>(stub);
LoadDescriptor descriptor(isolate());
HValue* op_vals[] = {context(), receiver, name};
Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
Vector<HValue*>(op_vals, 3), TAIL_CALL);
// We never return here, it is a tail call.
}
megamorphic_checker.End();
}
array_checker.End();
TailCallMiss(receiver, name, slot, vector, true);
return zero;
}
Handle<Code> VectorKeyedLoadStub::GenerateCode() {
return DoGenerateCode(this);
}
Handle<Code> MegamorphicLoadStub::GenerateCode() {
return DoGenerateCode(this);
}

24
deps/v8/src/code-stubs.cc

@ -620,26 +620,6 @@ CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor() {
}
static void InitializeVectorLoadStub(Isolate* isolate,
CodeStubDescriptor* descriptor,
Address deoptimization_handler) {
DCHECK(FLAG_vector_ics);
descriptor->Initialize(deoptimization_handler);
}
void VectorLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
InitializeVectorLoadStub(isolate(), descriptor,
FUNCTION_ADDR(LoadIC_MissFromStubFailure));
}
void VectorKeyedLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
InitializeVectorLoadStub(isolate(), descriptor,
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
}
void MegamorphicLoadStub::InitializeDescriptor(CodeStubDescriptor* d) {}
@ -684,7 +664,7 @@ void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
Runtime::FunctionForId(Runtime::kRegExpConstructResultRT)->entry);
}
@ -730,7 +710,7 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAddRT)->entry);
}

66
deps/v8/src/code-stubs.h

@ -85,8 +85,8 @@ namespace internal {
V(StringAdd) \
V(ToBoolean) \
V(TransitionElementsKind) \
V(VectorKeyedLoad) \
V(VectorLoad) \
V(VectorRawKeyedLoad) \
V(VectorRawLoad) \
/* IC Handler stubs */ \
V(LoadConstant) \
V(LoadField) \
@ -614,7 +614,7 @@ class FastNewClosureStub : public HydrogenCodeStub {
private:
STATIC_ASSERT(LANGUAGE_END == 3);
class LanguageModeBits : public BitField<LanguageMode, 0, 2> {};
class FunctionKindBits : public BitField<FunctionKind, 2, 7> {};
class FunctionKindBits : public BitField<FunctionKind, 2, 8> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@ -1777,6 +1777,15 @@ enum ReceiverCheckMode {
};
enum EmbedMode {
// The code being generated is part of an IC handler, which may MISS
// to an IC in failure cases.
PART_OF_IC_HANDLER,
NOT_PART_OF_IC_HANDLER
};
// Generates code implementing String.prototype.charCodeAt.
//
// Only supports the case when the receiver is a string and the index
@ -1813,7 +1822,7 @@ class StringCharCodeAtGenerator {
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper);
// Skip handling slow case and directly jump to bailout.
@ -1913,9 +1922,9 @@ class StringCharAtGenerator {
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
char_code_at_generator_.GenerateSlow(masm, call_helper);
char_code_at_generator_.GenerateSlow(masm, embed_mode, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@ -2062,38 +2071,49 @@ class MegamorphicLoadStub : public HydrogenCodeStub {
};
class VectorLoadStub : public HydrogenCodeStub {
class VectorRawLoadStub : public PlatformCodeStub {
public:
explicit VectorLoadStub(Isolate* isolate, const LoadICState& state)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(state.GetExtraICState());
explicit VectorRawLoadStub(Isolate* isolate, const LoadICState& state)
: PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
void GenerateForTrampoline(MacroAssembler* masm);
InlineCacheState GetICState() const FINAL { return DEFAULT; }
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(sub_minor_key());
}
virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
private:
LoadICState state() const { return LoadICState(GetExtraICState()); }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
return static_cast<ExtraICState>(minor_key_);
}
DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
DEFINE_HYDROGEN_CODE_STUB(VectorLoad, HydrogenCodeStub);
DEFINE_PLATFORM_CODE_STUB(VectorRawLoad, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
class VectorKeyedLoadStub : public VectorLoadStub {
class VectorRawKeyedLoadStub : public PlatformCodeStub {
public:
explicit VectorKeyedLoadStub(Isolate* isolate)
: VectorLoadStub(isolate, LoadICState(0)) {}
explicit VectorRawKeyedLoadStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
void GenerateForTrampoline(MacroAssembler* masm);
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::KEYED_LOAD_IC;
}
virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
DEFINE_PLATFORM_CODE_STUB(VectorRawKeyedLoad, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};

38
deps/v8/src/codegen.cc

@ -12,6 +12,7 @@
#include "src/compiler.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
#include "src/rewriter.h"
#include "src/runtime/runtime.h"
@ -134,13 +135,13 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
}
#ifdef DEBUG
if (!info->IsStub() && print_source) {
if (info->parse_info() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter(info->isolate(), info->zone())
.PrintProgram(info->function()));
}
if (!info->IsStub() && print_ast) {
if (info->parse_info() && print_ast) {
PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
.PrintProgram(info->function()));
}
@ -181,14 +182,27 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
(info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION;
const char* debug_name;
SmartArrayPointer<char> debug_name_holder;
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
debug_name = CodeStub::MajorName(major_key, false);
} else {
debug_name_holder =
info->parse_info()->function()->debug_name()->ToCString();
debug_name = debug_name_holder.get();
}
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
// Print the source code if available.
FunctionLiteral* function = nullptr;
bool print_source =
info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION);
if (print_source) {
function = info->function();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
os << "--- Raw source ---\n";
@ -207,10 +221,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
if (FLAG_print_unopt_code && info->parse_info()) {
os << "--- Unoptimized code ---\n";
info->closure()->shared()->code()->Disassemble(
function->debug_name()->ToCString().get(), os);
info->closure()->shared()->code()->Disassemble(debug_name, os);
}
os << "--- Optimized code ---\n"
<< "optimization_id = " << info->optimization_id() << "\n";
@ -220,12 +233,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_source) {
os << "source_position = " << function->start_position() << "\n";
}
if (info->IsStub()) {
CodeStub::Major major_key = info->code_stub()->MajorKey();
code->Disassemble(CodeStub::MajorName(major_key, false), os);
} else {
code->Disassemble(function->debug_name()->ToCString().get(), os);
}
code->Disassemble(debug_name, os);
os << "--- End code ---\n";
}
#endif // ENABLE_DISASSEMBLER

41
deps/v8/src/collection.js

@ -20,26 +20,17 @@ function SetConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['Set']);
}
var iter, adder;
%_SetInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
iter = GetIterator(ToObject(iterable));
adder = this.add;
var adder = this.add;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['add', this]);
}
}
%_SetInitialize(this);
if (IS_UNDEFINED(iter)) return;
var next, done;
while (!(next = iter.next()).done) {
if (!IS_SPEC_OBJECT(next)) {
throw MakeTypeError('iterator_result_not_an_object', [next]);
for (var value of iterable) {
%_CallFunction(this, value, adder);
}
%_CallFunction(this, next.value, adder);
}
}
@ -160,30 +151,20 @@ function MapConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['Map']);
}
var iter, adder;
%_MapInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
iter = GetIterator(ToObject(iterable));
adder = this.set;
var adder = this.set;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['set', this]);
}
}
%_MapInitialize(this);
if (IS_UNDEFINED(iter)) return;
var next, done, nextItem;
while (!(next = iter.next()).done) {
if (!IS_SPEC_OBJECT(next)) {
throw MakeTypeError('iterator_result_not_an_object', [next]);
}
nextItem = next.value;
if (!IS_SPEC_OBJECT(nextItem)) {
throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
for (var nextItem of iterable) {
if (!IS_SPEC_OBJECT(nextItem)) {
throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
}
%_CallFunction(this, nextItem[0], nextItem[1], adder);
}
%_CallFunction(this, nextItem[0], nextItem[1], adder);
}
}

1
deps/v8/src/compilation-cache.cc

@ -6,7 +6,6 @@
#include "src/assembler.h"
#include "src/compilation-cache.h"
#include "src/serialize.h"
namespace v8 {
namespace internal {

579
deps/v8/src/compiler.cc

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/compiler.h"
#include <algorithm>
#include "src/ast-numbering.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
@ -28,13 +28,13 @@
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
#include "src/snapshot/serialize.h"
#include "src/typing.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
if (p.IsUnknown()) {
return os << "<?>";
@ -46,137 +46,65 @@ std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
}
ScriptData::ScriptData(const byte* data, int length)
: owns_data_(false), rejected_(false), data_(data), length_(length) {
if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
byte* copy = NewArray<byte>(length);
DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
CopyBytes(copy, data, length);
data_ = copy;
AcquireDataOwnership();
#define PARSE_INFO_GETTER(type, name) \
type CompilationInfo::name() const { \
CHECK(parse_info()); \
return parse_info()->name(); \
}
}
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: flags_(kThisHasUses),
script_(script),
source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
Initialize(script->GetIsolate(), BASE, zone);
}
#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
type CompilationInfo::name() const { \
return parse_info() ? parse_info()->name() : def; \
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
: flags_(kLazy | kThisHasUses),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
PARSE_INFO_GETTER(Handle<Script>, script)
PARSE_INFO_GETTER(bool, is_eval)
PARSE_INFO_GETTER(bool, is_native)
PARSE_INFO_GETTER(bool, is_module)
PARSE_INFO_GETTER(LanguageMode, language_mode)
PARSE_INFO_GETTER_WITH_DEFAULT(Handle<JSFunction>, closure,
Handle<JSFunction>::null())
PARSE_INFO_GETTER(FunctionLiteral*, function)
PARSE_INFO_GETTER_WITH_DEFAULT(Scope*, scope, nullptr)
PARSE_INFO_GETTER(Handle<Context>, context)
PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
#undef PARSE_INFO_GETTER
#undef PARSE_INFO_GETTER_WITH_DEFAULT
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: flags_(kLazy | kThisHasUses),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
source_stream_(NULL),
context_(closure->context()),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
Initialize(script_->GetIsolate(), BASE, zone);
}
// Exactly like a CompilationInfo, except being allocated via {new} and it also
// creates and enters a Zone on construction and deallocates it on destruction.
class CompilationInfoWithZone : public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<JSFunction> function)
: CompilationInfo(new ParseInfo(&zone_, function)) {}
CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
: flags_(kLazy | kThisHasUses),
source_stream_(NULL),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
Initialize(isolate, STUB, zone);
code_stub_ = stub;
}
// Virtual destructor because a CompilationInfoWithZone has to exit the
// zone scope and get rid of dependent maps even when the destructor is
// called when cast as a CompilationInfo.
virtual ~CompilationInfoWithZone() {
DisableFutureOptimization();
RollbackDependencies();
delete parse_info_;
parse_info_ = nullptr;
}
private:
Zone zone_;
};
CompilationInfo::CompilationInfo(
ScriptCompiler::ExternalSourceStream* stream,
ScriptCompiler::StreamedSource::Encoding encoding, Isolate* isolate,
Zone* zone)
: flags_(kThisHasUses),
source_stream_(stream),
source_stream_encoding_(encoding),
osr_ast_id_(BailoutId::None()),
parameter_count_(0),
optimization_id_(-1),
ast_value_factory_(NULL),
ast_value_factory_owned_(false),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
Initialize(isolate, BASE, zone);
bool CompilationInfo::has_shared_info() const {
return parse_info_ && !parse_info_->shared_info().is_null();
}
void CompilationInfo::Initialize(Isolate* isolate,
Mode mode,
Zone* zone) {
isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
script_scope_ = NULL;
extension_ = NULL;
cached_data_ = NULL;
compile_options_ = ScriptCompiler::kNoCompileOptions;
zone_ = zone;
deferred_handles_ = NULL;
code_stub_ = NULL;
prologue_offset_ = Code::kPrologueOffsetNotSet;
opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
if (FLAG_hydrogen_track_positions) {
inlined_function_infos_ = new List<InlinedFunctionInfo>(5);
inlining_id_to_function_id_ = new List<int>(5);
} else {
inlined_function_infos_ = NULL;
inlining_id_to_function_id_ = NULL;
}
for (int i = 0; i < DependentCode::kGroupCount; i++) {
dependencies_[i] = NULL;
}
if (mode == STUB) {
mode_ = STUB;
return;
}
mode_ = mode;
if (!script_.is_null() && script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
CompilationInfo::CompilationInfo(ParseInfo* parse_info)
: CompilationInfo(parse_info, nullptr, BASE, parse_info->isolate(),
parse_info->zone()) {
// Compiling for the snapshot typically results in different code than
// compiling later on. This means that code recompiled with deoptimization
// support won't be "equivalent" (as defined by SharedFunctionInfo::
@ -187,34 +115,54 @@ void CompilationInfo::Initialize(Isolate* isolate,
if (isolate_->debug()->is_active()) MarkAsDebug();
if (FLAG_context_specialization) MarkAsContextSpecializing();
if (FLAG_turbo_builtin_inlining) MarkAsBuiltinInliningEnabled();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (FLAG_turbo_types) MarkAsTypingEnabled();
if (!shared_info_.is_null()) {
DCHECK(is_sloppy(language_mode()));
SetLanguageMode(shared_info_->language_mode());
}
bailout_reason_ = kNoReason;
if (!shared_info().is_null() && shared_info()->is_compiled()) {
if (has_shared_info() && shared_info()->is_compiled()) {
// We should initialize the CompilationInfo feedback vector from the
// passed in shared info, rather than creating a new one.
feedback_vector_ =
Handle<TypeFeedbackVector>(shared_info()->feedback_vector(), isolate);
feedback_vector_ = Handle<TypeFeedbackVector>(
shared_info()->feedback_vector(), parse_info->isolate());
}
}
CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
: CompilationInfo(nullptr, stub, STUB, isolate, zone) {}
CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
Mode mode, Isolate* isolate, Zone* zone)
: parse_info_(parse_info),
isolate_(isolate),
flags_(0),
code_stub_(code_stub),
mode_(mode),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(nullptr),
bailout_reason_(kNoReason),
prologue_offset_(Code::kPrologueOffsetNotSet),
no_frame_ranges_(isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2)
: nullptr),
track_positions_(FLAG_hydrogen_track_positions ||
isolate->cpu_profiler()->is_profiling()),
opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
parameter_count_(0),
optimization_id_(-1),
aborted_due_to_dependency_change_(false),
osr_expr_stack_height_(0) {
std::fill_n(dependencies_, DependentCode::kGroupCount, nullptr);
}
CompilationInfo::~CompilationInfo() {
if (GetFlag(kDisableFutureOptimization)) {
shared_info()->DisableOptimization(bailout_reason());
}
DisableFutureOptimization();
delete deferred_handles_;
delete no_frame_ranges_;
delete inlined_function_infos_;
delete inlining_id_to_function_id_;
if (ast_value_factory_owned_) delete ast_value_factory_;
#ifdef DEBUG
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
@ -273,33 +221,21 @@ void CompilationInfo::RollbackDependencies() {
int CompilationInfo::num_parameters() const {
if (IsStub()) {
DCHECK(parameter_count_ > 0);
return parameter_count_;
} else {
return scope()->num_parameters();
}
return has_scope() ? scope()->num_parameters() : parameter_count_;
}
int CompilationInfo::num_heap_slots() const {
if (IsStub()) {
return 0;
} else {
return scope()->num_heap_slots();
}
return has_scope() ? scope()->num_heap_slots() : 0;
}
Code::Flags CompilationInfo::flags() const {
if (IsStub()) {
return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(),
code_stub()->GetExtraICState(),
code_stub()->GetStubType());
} else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
return code_stub() != nullptr
? Code::ComputeFlags(
code_stub()->GetCodeKind(), code_stub()->GetICState(),
code_stub()->GetExtraICState(), code_stub()->GetStubType())
: Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@ -307,17 +243,10 @@ Code::Flags CompilationInfo::flags() const {
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}
void CompilationInfo::PrepareForCompilation(Scope* scope) {
DCHECK(scope_ == NULL);
scope_ = scope;
return FLAG_crankshaft && !function()->flags()->Contains(kDontSelfOptimize) &&
!function()->dont_optimize() &&
function()->scope()->AllowsLazyCompilation() &&
(!has_shared_info() || !shared_info()->optimization_disabled());
}
@ -330,87 +259,95 @@ void CompilationInfo::EnsureFeedbackVector() {
bool CompilationInfo::is_simple_parameter_list() {
return scope_->is_simple_parameter_list();
return scope()->is_simple_parameter_list();
}
int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position) {
if (!FLAG_hydrogen_track_positions) {
return 0;
}
DCHECK(inlined_function_infos_);
DCHECK(inlining_id_to_function_id_);
int id = 0;
for (; id < inlined_function_infos_->length(); id++) {
if (inlined_function_infos_->at(id).shared().is_identical_to(shared)) {
break;
}
}
if (id == inlined_function_infos_->length()) {
inlined_function_infos_->Add(InlinedFunctionInfo(shared));
if (!shared->script()->IsUndefined()) {
Handle<Script> script(Script::cast(shared->script()));
if (!script->source()->IsUndefined()) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
<< ") id{" << optimization_id() << "," << id << "} ---\n";
{
DisallowHeapAllocation no_allocation;
int start = shared->start_position();
int len = shared->end_position() - start;
String::SubStringRange source(String::cast(script->source()), start,
len);
for (const auto& c : source) {
os << AsReversiblyEscapedUC16(c);
}
SourcePosition position,
int parent_id) {
DCHECK(track_positions_);
int inline_id = static_cast<int>(inlined_function_infos_.size());
InlinedFunctionInfo info(parent_id, position, UnboundScript::kNoScriptId,
shared->start_position());
if (!shared->script()->IsUndefined()) {
Handle<Script> script(Script::cast(shared->script()));
info.script_id = script->id()->value();
if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
<< ") id{" << optimization_id() << "," << inline_id << "} ---\n";
{
DisallowHeapAllocation no_allocation;
int start = shared->start_position();
int len = shared->end_position() - start;
String::SubStringRange source(String::cast(script->source()), start,
len);
for (const auto& c : source) {
os << AsReversiblyEscapedUC16(c);
}
os << "\n--- END ---\n";
}
os << "\n--- END ---\n";
}
}
int inline_id = inlining_id_to_function_id_->length();
inlining_id_to_function_id_->Add(id);
inlined_function_infos_.push_back(info);
if (inline_id != 0) {
if (FLAG_hydrogen_track_positions && inline_id != 0) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
<< optimization_id() << "," << id << "} AS " << inline_id << " AT "
<< position << std::endl;
<< optimization_id() << "," << inline_id << "} AS " << inline_id
<< " AT " << position << std::endl;
}
return inline_id;
}
void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
if (!track_positions_ || IsStub()) return;
DCHECK_LT(static_cast<size_t>(inlining_id), inlined_function_infos_.size());
inlined_function_infos_.at(inlining_id).deopt_pc_offsets.push_back(pc_offset);
}
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
: HOptimizedGraphBuilder(info) {
}
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
SourcePosition old_position = SourcePosition::Unknown(); \
if (node->position() != RelocInfo::kNoPosition) { \
old_position = source_position(); \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
if (!old_position.IsUnknown()) { \
set_source_position(old_position); \
} \
}
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
SourcePosition old_position = SourcePosition::Unknown(); \
if (node->position() != RelocInfo::kNoPosition) { \
old_position = source_position(); \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
if (!old_position.IsUnknown()) { \
set_source_position(old_position); \
} \
}
STATEMENT_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@ -501,6 +438,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->is_osr()) os << " OSR";
os << "]" << std::endl;
}
if (info()->shared_info()->asm_function()) {
info()->MarkAsContextSpecializing();
} else if (FLAG_turbo_type_feedback) {
info()->MarkAsTypeFeedbackEnabled();
}
Timer t(this, &time_taken_to_create_graph_);
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
@ -509,6 +453,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
}
// Do not use Crankshaft if the code is intended to be serialized.
if (!isolate()->use_crankshaft()) return SetLastStatus(FAILED);
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
@ -531,12 +478,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
info()->shared_info()->disable_optimization_reason());
}
graph_builder_ = (FLAG_hydrogen_track_positions || FLAG_trace_ic)
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info());
graph_builder_ = (info()->is_tracking_positions() || FLAG_trace_ic)
? new (info()->zone())
HOptimizedGraphBuilderWithPositions(info())
: new (info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
info()->set_this_has_uses(false);
graph_ = graph_builder_->CreateGraph();
if (isolate()->has_pending_exception()) {
@ -586,7 +533,8 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
// TODO(turbofan): Currently everything is done in the first phase.
if (!info()->code().is_null()) {
if (FLAG_turbo_deoptimization) {
info()->context()->native_context()->AddOptimizedCode(*info()->code());
info()->parse_info()->context()->native_context()->AddOptimizedCode(
*info()->code());
}
RecordOptimizationStats();
return last_status();
@ -698,7 +646,7 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging_code_events() ||
info->isolate()->cpu_profiler()->is_profiling()) {
Handle<Script> script = info->script();
Handle<Script> script = info->parse_info()->script();
Handle<Code> code = info->code();
if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
return;
@ -714,16 +662,13 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CodeCreateEvent(log_tag, *code, *shared, info, script_name,
line_num, column_num));
}
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()), Handle<Code>(info->code()),
info));
}
static bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
if (!Compiler::Analyze(info) || !FullCodeGenerator::MakeCode(info)) {
if (!Compiler::Analyze(info->parse_info()) ||
!FullCodeGenerator::MakeCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@ -738,7 +683,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
PostponeInterruptsScope postpone(info->isolate());
// Parse and update CompilationInfo with the results.
if (!Parser::ParseStatic(info)) return MaybeHandle<Code>();
if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
FunctionLiteral* lit = info->function();
shared->set_language_mode(lit->language_mode());
@ -814,22 +759,23 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
}
static bool Renumber(CompilationInfo* info) {
if (!AstNumbering::Renumber(info->isolate(), info->zone(),
info->function())) {
static bool Renumber(ParseInfo* parse_info) {
if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
parse_info->function())) {
return false;
}
if (!info->shared_info().is_null()) {
FunctionLiteral* lit = info->function();
info->shared_info()->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(info->shared_info(), lit->dont_optimize_reason());
info->shared_info()->set_dont_cache(lit->flags()->Contains(kDontCache));
Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
if (!shared_info.is_null()) {
FunctionLiteral* lit = parse_info->function();
shared_info->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
shared_info->set_dont_cache(lit->flags()->Contains(kDontCache));
}
return true;
}
bool Compiler::Analyze(CompilationInfo* info) {
bool Compiler::Analyze(ParseInfo* info) {
DCHECK(info->function() != NULL);
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
@ -839,14 +785,14 @@ bool Compiler::Analyze(CompilationInfo* info) {
}
bool Compiler::ParseAndAnalyze(CompilationInfo* info) {
bool Compiler::ParseAndAnalyze(ParseInfo* info) {
if (!Parser::ParseStatic(info)) return false;
return Compiler::Analyze(info);
}
static bool GetOptimizedCodeNow(CompilationInfo* info) {
if (!Compiler::ParseAndAnalyze(info)) return false;
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@ -883,8 +829,11 @@ static bool GetOptimizedCodeLater(CompilationInfo* info) {
}
CompilationHandleScope handle_scope(info);
if (!Compiler::ParseAndAnalyze(info)) return false;
info->SaveHandles(); // Copy handles to the compilation handle scope.
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
// Reopen handles in the new CompilationHandleScope.
info->ReopenHandlesInNewHandleScope();
info->parse_info()->ReopenHandlesInNewHandleScope();
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@ -930,14 +879,14 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
if (FLAG_turbo_asm && function->shared()->asm_function() &&
(FLAG_turbo_deoptimization || !isolate->debug()->is_active())) {
(FLAG_turbo_deoptimization || !isolate->debug()->is_active()) &&
!FLAG_turbo_osr) {
CompilationInfoWithZone info(function);
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
info.MarkAsContextSpecializing();
if (GetOptimizedCodeNow(&info)) {
DCHECK(function->shared()->is_compiled());
@ -957,7 +906,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
Code);
if (FLAG_always_opt && isolate->use_crankshaft()) {
if (FLAG_always_opt) {
Handle<Code> opt_code;
if (Compiler::GetOptimizedCode(
function, result,
@ -975,7 +924,9 @@ MaybeHandle<Code> Compiler::GetUnoptimizedCode(
DCHECK(!shared->GetIsolate()->has_pending_exception());
DCHECK(!shared->is_compiled());
CompilationInfoWithZone info(shared);
Zone zone;
ParseInfo parse_info(&zone, shared);
CompilationInfo info(&parse_info);
return GetUnoptimizedCodeCommon(&info);
}
@ -1002,14 +953,16 @@ bool Compiler::EnsureCompiled(Handle<JSFunction> function,
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK(info->function() != NULL);
DCHECK(info->scope() != NULL);
if (!info->shared_info()->has_deoptimization_support()) {
Handle<SharedFunctionInfo> shared = info->shared_info();
CompilationInfoWithZone unoptimized(shared);
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!shared->has_deoptimization_support()) {
// TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
CompilationInfoWithZone unoptimized(info->closure());
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info->function());
unoptimized.PrepareForCompilation(info->scope());
unoptimized.SetContext(info->context());
ParseInfo* parse_info = unoptimized.parse_info();
parse_info->set_literal(info->function());
parse_info->set_scope(info->scope());
parse_info->set_context(info->context());
unoptimized.EnableDeoptimizationSupport();
// If the current code has reloc info for serialization, also include
// reloc info for serialization for the new code, so that deopt support
@ -1079,16 +1032,18 @@ MaybeHandle<Code> Compiler::GetDebugCode(Handle<JSFunction> function) {
void Compiler::CompileForLiveEdit(Handle<Script> script) {
// TODO(635): support extensions.
CompilationInfoWithZone info(script);
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
PostponeInterruptsScope postpone(info.isolate());
VMState<COMPILER> state(info.isolate());
info.MarkAsGlobal();
if (!Parser::ParseStatic(&info)) return;
info.parse_info()->set_global();
if (!Parser::ParseStatic(info.parse_info())) return;
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
if (!info.shared_info().is_null()) {
if (info.has_shared_info()) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
info.shared_info()->set_scope_info(*scope_info);
@ -1101,40 +1056,44 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
Handle<Script> script = info->script();
ParseInfo* parse_info = info->parse_info();
Handle<Script> script = parse_info->script();
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
script->set_context_data(array->get(v8::Context::kDebugIdIndex));
isolate->debug()->OnBeforeCompile(script);
DCHECK(info->is_eval() || info->is_global() || info->is_module());
DCHECK(parse_info->is_eval() || parse_info->is_global() ||
parse_info->is_module());
info->MarkAsToplevel();
parse_info->set_toplevel();
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
if (info->function() == NULL) {
if (parse_info->literal() == NULL) {
// Parse the script if needed (if it's already parsed, function() is
// non-NULL).
bool parse_allow_lazy =
(info->compile_options() == ScriptCompiler::kConsumeParserCache ||
String::cast(script->source())->length() >
FLAG_min_preparse_length) &&
!Compiler::DebuggerWantsEagerCompilation(info);
ScriptCompiler::CompileOptions options = parse_info->compile_options();
bool parse_allow_lazy = (options == ScriptCompiler::kConsumeParserCache ||
String::cast(script->source())->length() >
FLAG_min_preparse_length) &&
!Compiler::DebuggerWantsEagerCompilation(isolate);
parse_info->set_allow_lazy_parsing(parse_allow_lazy);
if (!parse_allow_lazy &&
(info->compile_options() == ScriptCompiler::kProduceParserCache ||
info->compile_options() == ScriptCompiler::kConsumeParserCache)) {
(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kConsumeParserCache)) {
// We are going to parse eagerly, but we either 1) have cached data
// produced by lazy parsing or 2) are asked to generate cached data.
// Eager parsing cannot benefit from cached data, and producing cached
// data while parsing eagerly is not implemented.
info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
parse_info->set_cached_data(nullptr);
parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
}
if (!Parser::ParseStatic(info, parse_allow_lazy)) {
if (!Parser::ParseStatic(parse_info)) {
return Handle<SharedFunctionInfo>::null();
}
}
@ -1177,7 +1136,6 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
PROFILE(isolate, CodeCreateEvent(
log_tag, *info->code(), *result, info, *script_name));
GDBJIT(AddCode(script_name, script, info->code(), info));
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@ -1214,12 +1172,14 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!maybe_shared_info.ToHandle(&shared_info)) {
Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfoWithZone info(script);
info.MarkAsEval();
if (context->IsNativeContext()) info.MarkAsGlobal();
info.SetLanguageMode(language_mode);
info.SetParseRestriction(restriction);
info.SetContext(context);
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
parse_info.set_eval();
if (context->IsNativeContext()) parse_info.set_global();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
parse_info.set_context(context);
Debug::RecordEvalCaller(script);
@ -1254,8 +1214,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, bool is_embedder_debug_script,
bool is_shared_cross_origin, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
bool is_shared_cross_origin, Handle<Object> source_map_url,
Handle<Context> context, v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
bool is_module) {
Isolate* isolate = source->GetIsolate();
@ -1331,23 +1291,31 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
script->set_is_embedder_debug_script(is_embedder_debug_script);
if (!source_map_url.is_null()) {
script->set_source_mapping_url(*source_map_url);
}
// Compile the function and add it to the cache.
CompilationInfoWithZone info(script);
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
if (FLAG_harmony_modules && is_module) {
info.MarkAsModule();
parse_info.set_module();
} else {
info.MarkAsGlobal();
parse_info.set_global();
}
info.SetCachedData(cached_data, compile_options);
info.SetExtension(extension);
info.SetContext(context);
if (compile_options != ScriptCompiler::kNoCompileOptions) {
parse_info.set_cached_data(cached_data);
}
parse_info.set_compile_options(compile_options);
parse_info.set_extension(extension);
parse_info.set_context(context);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
info.PrepareForSerializing();
}
info.SetLanguageMode(
parse_info.set_language_mode(
static_cast<LanguageMode>(info.language_mode() | language_mode));
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
@ -1373,19 +1341,21 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
CompilationInfo* info, int source_length) {
Isolate* isolate = info->isolate();
Handle<Script> script, ParseInfo* parse_info, int source_length) {
Isolate* isolate = script->GetIsolate();
// TODO(titzer): increment the counters in caller.
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
LanguageMode language_mode =
construct_language_mode(FLAG_use_strict, FLAG_use_strong);
info->SetLanguageMode(
static_cast<LanguageMode>(info->language_mode() | language_mode));
parse_info->set_language_mode(
static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
CompilationInfo compile_info(parse_info);
// TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
// real code caching lands, streaming needs to be adapted to use it.
return CompileToplevel(info);
return CompileToplevel(&compile_info);
}
@ -1393,10 +1363,12 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
FunctionLiteral* literal, Handle<Script> script,
CompilationInfo* outer_info) {
// Precondition: code has been parsed and scopes have been analyzed.
CompilationInfoWithZone info(script);
info.SetFunction(literal);
info.PrepareForCompilation(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
Zone zone;
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info);
parse_info.set_literal(literal);
parse_info.set_scope(literal->scope());
parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
Isolate* isolate = info.isolate();
@ -1412,10 +1384,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
!DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
bool allow_lazy =
literal->AllowsLazyCompilation() &&
!DebuggerWantsEagerCompilation(isolate, allow_lazy_without_ctx);
if (outer_info->is_toplevel() && outer_info->will_serialize()) {
if (outer_info->parse_info()->is_toplevel() && outer_info->will_serialize()) {
// Make sure that if the toplevel code (possibly to be serialized),
// the inner function must be allowed to be compiled lazily.
// This is necessary to serialize toplevel code without inner functions.
@ -1436,7 +1409,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// called.
info.EnsureFeedbackVector();
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
} else if (Renumber(&info) && FullCodeGenerator::MakeCode(&info)) {
} else if (Renumber(info.parse_info()) &&
FullCodeGenerator::MakeCode(&info)) {
// MakeCode will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
@ -1481,7 +1455,6 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
VMState<COMPILER> state(isolate);
DCHECK(isolate->use_crankshaft());
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
@ -1565,10 +1538,10 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
}
bool Compiler::DebuggerWantsEagerCompilation(CompilationInfo* info,
bool Compiler::DebuggerWantsEagerCompilation(Isolate* isolate,
bool allow_lazy_without_ctx) {
if (LiveEditFunctionTracker::IsActive(info->isolate())) return true;
Debug* debug = info->isolate()->debug();
if (LiveEditFunctionTracker::IsActive(isolate)) return true;
Debug* debug = isolate->debug();
bool debugging = debug->is_active() || debug->has_break_points();
return debugging && !allow_lazy_without_ctx;
}

390
deps/v8/src/compiler.h

@ -15,13 +15,8 @@ namespace internal {
class AstValueFactory;
class HydrogenCodeStub;
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
enum ParseRestriction {
NO_PARSE_RESTRICTION, // All expressions are allowed.
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
class ParseInfo;
class ScriptData;
struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
@ -39,9 +34,9 @@ struct OffsetRange {
// script start.
class SourcePosition {
public:
SourcePosition(const SourcePosition& other) : value_(other.value_) {}
static SourcePosition Unknown() { return SourcePosition(kNoPosition); }
static SourcePosition Unknown() {
return SourcePosition::FromRaw(kNoPosition);
}
bool IsUnknown() const { return value_ == kNoPosition; }
@ -72,10 +67,14 @@ class SourcePosition {
// Offset from the start of the inlined function.
typedef BitField<uint32_t, 9, 23> PositionField;
explicit SourcePosition(uint32_t value) : value_(value) {}
friend class HPositionInfo;
friend class LCodeGenBase;
friend class Deoptimizer;
static SourcePosition FromRaw(uint32_t raw_position) {
SourcePosition position;
position.value_ = raw_position;
return position;
}
// If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
// and PositionField.
@ -87,52 +86,23 @@ class SourcePosition {
std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
class InlinedFunctionInfo {
public:
explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
: shared_(shared), start_position_(shared->start_position()) {}
Handle<SharedFunctionInfo> shared() const { return shared_; }
int start_position() const { return start_position_; }
private:
Handle<SharedFunctionInfo> shared_;
int start_position_;
struct InlinedFunctionInfo {
InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
int script_id, int start_position)
: parent_id(parent_id),
inline_position(inline_position),
script_id(script_id),
start_position(start_position) {}
int parent_id;
SourcePosition inline_position;
int script_id;
int start_position;
std::vector<size_t> deopt_pc_offsets;
static const int kNoParentId = -1;
};
class ScriptData {
public:
ScriptData(const byte* data, int length);
~ScriptData() {
if (owns_data_) DeleteArray(data_);
}
const byte* data() const { return data_; }
int length() const { return length_; }
bool rejected() const { return rejected_; }
void Reject() { rejected_ = true; }
void AcquireDataOwnership() {
DCHECK(!owns_data_);
owns_data_ = true;
}
void ReleaseDataOwnership() {
DCHECK(owns_data_);
owns_data_ = false;
}
private:
bool owns_data_ : 1;
bool rejected_ : 1;
const byte* data_;
int length_;
DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
@ -140,113 +110,67 @@ class CompilationInfo {
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
enum Flag {
kLazy = 1 << 0,
kEval = 1 << 1,
kGlobal = 1 << 2,
kStrictMode = 1 << 3,
kStrongMode = 1 << 4,
kThisHasUses = 1 << 5,
kNative = 1 << 6,
kDeferredCalling = 1 << 7,
kNonDeferredCalling = 1 << 8,
kSavesCallerDoubles = 1 << 9,
kRequiresFrame = 1 << 10,
kMustNotHaveEagerFrame = 1 << 11,
kDeoptimizationSupport = 1 << 12,
kDebug = 1 << 13,
kCompilingForDebugging = 1 << 14,
kParseRestriction = 1 << 15,
kSerializing = 1 << 16,
kContextSpecializing = 1 << 17,
kInliningEnabled = 1 << 18,
kTypingEnabled = 1 << 19,
kDisableFutureOptimization = 1 << 20,
kModule = 1 << 21,
kToplevel = 1 << 22,
kSplittingEnabled = 1 << 23
kDeferredCalling = 1 << 0,
kNonDeferredCalling = 1 << 1,
kSavesCallerDoubles = 1 << 2,
kRequiresFrame = 1 << 3,
kMustNotHaveEagerFrame = 1 << 4,
kDeoptimizationSupport = 1 << 5,
kDebug = 1 << 6,
kCompilingForDebugging = 1 << 7,
kSerializing = 1 << 8,
kContextSpecializing = 1 << 9,
kInliningEnabled = 1 << 10,
kTypingEnabled = 1 << 11,
kDisableFutureOptimization = 1 << 12,
kSplittingEnabled = 1 << 13,
kBuiltinInliningEnabled = 1 << 14,
kTypeFeedbackEnabled = 1 << 15
};
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
CompilationInfo(Handle<Script> script, Zone* zone);
explicit CompilationInfo(ParseInfo* parse_info);
CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
ParseInfo* parse_info() const { return parse_info_; }
// -----------------------------------------------------------
// TODO(titzer): inline and delete accessors of ParseInfo
// -----------------------------------------------------------
Handle<Script> script() const;
bool is_eval() const;
bool is_native() const;
bool is_module() const;
LanguageMode language_mode() const;
Handle<JSFunction> closure() const;
FunctionLiteral* function() const;
Scope* scope() const;
Handle<Context> context() const;
Handle<SharedFunctionInfo> shared_info() const;
bool has_shared_info() const;
// -----------------------------------------------------------
Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
bool is_lazy() const { return GetFlag(kLazy); }
bool is_eval() const { return GetFlag(kEval); }
bool is_global() const { return GetFlag(kGlobal); }
bool is_module() const { return GetFlag(kModule); }
LanguageMode language_mode() const {
STATIC_ASSERT(LANGUAGE_END == 3);
return construct_language_mode(GetFlag(kStrictMode), GetFlag(kStrongMode));
}
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Scope* script_scope() const { return script_scope_; }
Handle<Code> code() const { return code_; }
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
void set_script(Handle<Script> script) { script_ = script; }
CodeStub* code_stub() const { return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptData** cached_data() const { return cached_data_; }
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
ScriptCompiler::ExternalSourceStream* source_stream() const {
return source_stream_;
}
ScriptCompiler::StreamedSource::Encoding source_stream_encoding() const {
return source_stream_encoding_;
}
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
Code::Flags flags() const;
void MarkAsEval() {
DCHECK(!is_lazy());
SetFlag(kEval);
}
void MarkAsGlobal() {
DCHECK(!is_lazy());
SetFlag(kGlobal);
}
void MarkAsModule() {
DCHECK(!is_lazy());
SetFlag(kModule);
}
bool has_scope() const { return scope() != nullptr; }
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
parameter_count_ = parameter_count;
}
void set_this_has_uses(bool has_no_uses) {
SetFlag(kThisHasUses, has_no_uses);
}
bool this_has_uses() { return GetFlag(kThisHasUses); }
void SetLanguageMode(LanguageMode language_mode) {
STATIC_ASSERT(LANGUAGE_END == 3);
SetFlag(kStrictMode, language_mode & STRICT_BIT);
SetFlag(kStrongMode, language_mode & STRONG_BIT);
}
void MarkAsNative() { SetFlag(kNative); }
bool is_native() const { return GetFlag(kNative); }
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
@ -286,17 +210,25 @@ class CompilationInfo {
bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
bool is_type_feedback_enabled() const {
return GetFlag(kTypeFeedbackEnabled);
}
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
void MarkAsBuiltinInliningEnabled() { SetFlag(kBuiltinInliningEnabled); }
bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
bool is_builtin_inlining_enabled() const {
return GetFlag(kBuiltinInliningEnabled);
}
void MarkAsToplevel() { SetFlag(kToplevel); }
void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
bool is_toplevel() const { return GetFlag(kToplevel); }
bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
@ -307,46 +239,11 @@ class CompilationInfo {
!is_debug();
}
void SetParseRestriction(ParseRestriction restriction) {
SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
}
ParseRestriction parse_restriction() const {
return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
: NO_PARSE_RESTRICTION;
}
void SetFunction(FunctionLiteral* literal) {
DCHECK(function_ == NULL);
function_ = literal;
}
void PrepareForCompilation(Scope* scope);
void SetScriptScope(Scope* script_scope) {
DCHECK(script_scope_ == NULL);
script_scope_ = script_scope;
}
void EnsureFeedbackVector();
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
void SetCode(Handle<Code> code) { code_ = code; }
void SetExtension(v8::Extension* extension) {
DCHECK(!is_lazy());
extension_ = extension;
}
void SetCachedData(ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options) {
compile_options_ = compile_options;
if (compile_options == ScriptCompiler::kNoCompileOptions) {
cached_data_ = NULL;
} else {
DCHECK(!is_lazy());
cached_data_ = cached_data;
}
}
void SetContext(Handle<Context> context) {
context_ = context;
}
void MarkCompilingForDebugging() { SetFlag(kCompilingForDebugging); }
bool IsCompilingForDebugging() { return GetFlag(kCompilingForDebugging); }
@ -373,13 +270,18 @@ class CompilationInfo {
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
DCHECK(!shared_info_.is_null());
DCHECK(!shared_info().is_null());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
}
void SetStub(CodeStub* code_stub) {
SetMode(STUB);
code_stub_ = code_stub;
}
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
@ -409,12 +311,8 @@ class CompilationInfo {
void RollbackDependencies();
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
SaveHandle(&context_);
SaveHandle(&script_);
SaveHandle(&unoptimized_code_);
void ReopenHandlesInNewHandleScope() {
unoptimized_code_ = Handle<Code>(*unoptimized_code_);
}
void AbortOptimization(BailoutReason reason) {
@ -454,14 +352,16 @@ class CompilationInfo {
return result;
}
List<InlinedFunctionInfo>* inlined_function_infos() {
return inlined_function_infos_;
int start_position_for(uint32_t inlining_id) {
return inlined_function_infos_.at(inlining_id).start_position;
}
List<int>* inlining_id_to_function_id() {
return inlining_id_to_function_id_;
const std::vector<InlinedFunctionInfo>& inlined_function_infos() {
return inlined_function_infos_;
}
void LogDeoptCallPosition(int pc_offset, int inlining_id);
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position);
SourcePosition position, int pareint_id);
Handle<Foreign> object_wrapper() {
if (object_wrapper_.is_null()) {
@ -482,18 +382,11 @@ class CompilationInfo {
}
bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure());
}
int optimization_id() const { return optimization_id_; }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
void SetAstValueFactory(AstValueFactory* ast_value_factory,
bool owned = true) {
ast_value_factory_ = ast_value_factory;
ast_value_factory_owned_ = owned;
}
int osr_expr_stack_height() { return osr_expr_stack_height_; }
void set_osr_expr_stack_height(int height) {
DCHECK(height >= 0);
@ -507,16 +400,15 @@ class CompilationInfo {
bool is_simple_parameter_list();
protected:
CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone);
CompilationInfo(ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding,
Isolate* isolate, Zone* zone);
ParseInfo* parse_info_;
void DisableFutureOptimization() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
}
private:
Isolate* isolate_;
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
@ -529,7 +421,10 @@ class CompilationInfo {
STUB
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone);
CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub, Mode mode,
Isolate* isolate, Zone* zone);
Isolate* isolate_;
void SetMode(Mode mode) {
mode_ = mode;
@ -545,35 +440,11 @@ class CompilationInfo {
unsigned flags_;
// Fields filled in by the compilation pipeline.
// AST filled in by the parser.
FunctionLiteral* function_;
// The scope of the function literal as a convenience. Set to indicate
// that scopes have been analyzed.
Scope* scope_;
// The script scope provided as a convenience.
Scope* script_scope_;
// For compiled stubs, the stub object
CodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
// Possible initial inputs to the compilation process.
Handle<JSFunction> closure_;
Handle<SharedFunctionInfo> shared_info_;
Handle<Script> script_;
ScriptCompiler::ExternalSourceStream* source_stream_; // Not owned.
ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
ScriptData** cached_data_;
ScriptCompiler::CompileOptions compile_options_;
// The context of the caller for eval code, and the script context for a
// global script. Will be a null handle otherwise.
Handle<Context> context_;
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
Handle<TypeFeedbackVector> feedback_vector_;
@ -593,21 +464,13 @@ class CompilationInfo {
ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
Handle<T> handle(*(*object));
*object = handle;
}
}
BailoutReason bailout_reason_;
int prologue_offset_;
List<OffsetRange>* no_frame_ranges_;
List<InlinedFunctionInfo>* inlined_function_infos_;
List<int>* inlining_id_to_function_id_;
std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
@ -620,9 +483,6 @@ class CompilationInfo {
int optimization_id_;
AstValueFactory* ast_value_factory_;
bool ast_value_factory_owned_;
// This flag is used by the main thread to track whether this compilation
// should be abandoned due to dependency change.
bool aborted_due_to_dependency_change_;
@ -633,35 +493,6 @@ class CompilationInfo {
};
// Exactly like a CompilationInfo, except also creates and enters a
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
: CompilationInfo(shared_info, &zone_) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
: CompilationInfo(closure, &zone_) {}
CompilationInfoWithZone(CodeStub* stub, Isolate* isolate)
: CompilationInfo(stub, isolate, &zone_) {}
CompilationInfoWithZone(ScriptCompiler::ExternalSourceStream* stream,
ScriptCompiler::StreamedSource::Encoding encoding,
Isolate* isolate)
: CompilationInfo(stream, encoding, isolate, &zone_) {}
// Virtual destructor because a CompilationInfoWithZone has to exit the
// zone scope and get rid of dependent maps even when the destructor is
// called when cast as a CompilationInfo.
virtual ~CompilationInfoWithZone() {
RollbackDependencies();
}
private:
Zone zone_;
};
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
@ -786,9 +617,9 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(CompilationInfo* info);
static bool ParseAndAnalyze(ParseInfo* info);
// Rewrite, analyze scopes, and renumber.
static bool Analyze(CompilationInfo* info);
static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
@ -807,11 +638,13 @@ class Compiler : public AllStatic {
static Handle<SharedFunctionInfo> CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, bool is_debugger_script, bool is_shared_cross_origin,
Handle<Context> context, v8::Extension* extension,
ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
Handle<Object> source_map_url, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code, bool is_module);
static Handle<SharedFunctionInfo> CompileStreamedScript(CompilationInfo* info,
static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
ParseInfo* info,
int source_length);
// Create a shared function info object (the code may be lazily compiled).
@ -834,8 +667,9 @@ class Compiler : public AllStatic {
// On failure, return the empty handle.
static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
// TODO(titzer): move this method out of the compiler.
static bool DebuggerWantsEagerCompilation(
CompilationInfo* info, bool allow_lazy_without_ctx = false);
Isolate* isolate, bool allow_lazy_without_ctx = false);
};

24
deps/v8/src/compiler/access-builder.cc

@ -61,7 +61,8 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
// static
FieldAccess AccessBuilder::ForStringLength() {
return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
Type::SignedSmall(), kMachAnyTagged};
Type::Intersect(Type::UnsignedSmall(), Type::TaggedSigned()),
kMachAnyTagged};
}
@ -81,6 +82,12 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
}
// static
FieldAccess AccessBuilder::ForStatsCounter() {
return {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(), kMachInt32};
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
@ -115,6 +122,21 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
// static
ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
switch (encoding) {
case String::ONE_BYTE_ENCODING:
return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
kMachUint8};
case String::TWO_BYTE_ENCODING:
return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
kMachUint16};
}
UNREACHABLE();
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
} // namespace compiler
} // namespace internal
} // namespace v8

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save