Browse Source

deps: update v8 to 4.4.63.9

Upgrade the bundled V8 and update code in src/ and lib/ to the new API.

Notable backwards incompatible changes are the removal of the smalloc
module and dropped support for CESU-8 decoding.  CESU-8 support can be
brought back if necessary by doing UTF-8 decoding ourselves.

This commit includes https://codereview.chromium.org/1192973004 to fix
a build error on python 2.6 systems.  The original commit log follows:

    Use optparse in js2c.py for python compatibility

    Without this change, V8 won't build on RHEL/CentOS 6 because the
    distro python is too old to know about the argparse module.

PR-URL: https://github.com/nodejs/io.js/pull/2022
Reviewed-By: Rod Vagg <rod@vagg.org>
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
v4.0.0-rc
Ben Noordhuis 10 years ago
committed by Rod Vagg
parent
commit
70d1f32f56
  1. 1
      deps/v8/AUTHORS
  2. 105
      deps/v8/BUILD.gn
  3. 478
      deps/v8/ChangeLog
  4. 8
      deps/v8/DEPS
  5. 4
      deps/v8/Makefile
  6. 4
      deps/v8/PRESUBMIT.py
  7. 12
      deps/v8/WATCHLISTS
  8. 1
      deps/v8/build/OWNERS
  9. 73
      deps/v8/build/android.gypi
  10. 4
      deps/v8/build/features.gypi
  11. 55
      deps/v8/build/standalone.gypi
  12. 30
      deps/v8/build/toolchain.gypi
  13. 2
      deps/v8/include/OWNERS
  14. 8
      deps/v8/include/v8-debug.h
  15. 32
      deps/v8/include/v8-profiler.h
  16. 49
      deps/v8/include/v8-util.h
  17. 6
      deps/v8/include/v8-version.h
  18. 571
      deps/v8/include/v8.h
  19. 36
      deps/v8/include/v8config.h
  20. 34
      deps/v8/samples/process.cc
  21. 20
      deps/v8/samples/shell.cc
  22. 155
      deps/v8/src/accessors.cc
  23. 9
      deps/v8/src/accessors.h
  24. 1
      deps/v8/src/allocation-site-scopes.h
  25. 4
      deps/v8/src/allocation-tracker.cc
  26. 6
      deps/v8/src/api-natives.cc
  27. 440
      deps/v8/src/api.cc
  28. 10
      deps/v8/src/api.h
  29. 303
      deps/v8/src/arm/assembler-arm.cc
  30. 32
      deps/v8/src/arm/assembler-arm.h
  31. 105
      deps/v8/src/arm/builtins-arm.cc
  32. 216
      deps/v8/src/arm/code-stubs-arm.cc
  33. 12
      deps/v8/src/arm/code-stubs-arm.h
  34. 57
      deps/v8/src/arm/disasm-arm.cc
  35. 172
      deps/v8/src/arm/full-codegen-arm.cc
  36. 11
      deps/v8/src/arm/interface-descriptors-arm.cc
  37. 44
      deps/v8/src/arm/lithium-arm.cc
  38. 443
      deps/v8/src/arm/lithium-arm.h
  39. 203
      deps/v8/src/arm/lithium-codegen-arm.cc
  40. 8
      deps/v8/src/arm/lithium-codegen-arm.h
  41. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.h
  42. 42
      deps/v8/src/arm/macro-assembler-arm.cc
  43. 17
      deps/v8/src/arm/macro-assembler-arm.h
  44. 106
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  45. 258
      deps/v8/src/arm/simulator-arm.cc
  46. 2
      deps/v8/src/arm/simulator-arm.h
  47. 122
      deps/v8/src/arm64/builtins-arm64.cc
  48. 238
      deps/v8/src/arm64/code-stubs-arm64.cc
  49. 12
      deps/v8/src/arm64/code-stubs-arm64.h
  50. 182
      deps/v8/src/arm64/full-codegen-arm64.cc
  51. 11
      deps/v8/src/arm64/interface-descriptors-arm64.cc
  52. 51
      deps/v8/src/arm64/lithium-arm64.cc
  53. 466
      deps/v8/src/arm64/lithium-arm64.h
  54. 121
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  55. 6
      deps/v8/src/arm64/lithium-codegen-arm64.h
  56. 1
      deps/v8/src/arm64/macro-assembler-arm64.cc
  57. 2
      deps/v8/src/arm64/macro-assembler-arm64.h
  58. 107
      deps/v8/src/arm64/regexp-macro-assembler-arm64.cc
  59. 114
      deps/v8/src/array-iterator.js
  60. 400
      deps/v8/src/array.js
  61. 56
      deps/v8/src/arraybuffer.js
  62. 34
      deps/v8/src/assembler.cc
  63. 11
      deps/v8/src/assembler.h
  64. 7
      deps/v8/src/assert-scope.cc
  65. 46
      deps/v8/src/ast-numbering.cc
  66. 32
      deps/v8/src/ast-value-factory.cc
  67. 26
      deps/v8/src/ast-value-factory.h
  68. 10
      deps/v8/src/ast.cc
  69. 689
      deps/v8/src/ast.h
  70. 11
      deps/v8/src/bailout-reason.h
  71. 1
      deps/v8/src/base/OWNERS
  72. 51
      deps/v8/src/base/adapters.h
  73. 25
      deps/v8/src/base/compiler-specific.h
  74. 45
      deps/v8/src/base/cpu.cc
  75. 10
      deps/v8/src/base/cpu.h
  76. 2
      deps/v8/src/base/flags.h
  77. 14
      deps/v8/src/base/macros.h
  78. 10
      deps/v8/src/base/platform/condition-variable.cc
  79. 4
      deps/v8/src/base/platform/condition-variable.h
  80. 2
      deps/v8/src/base/platform/elapsed-timer.h
  81. 6
      deps/v8/src/base/platform/mutex.h
  82. 49
      deps/v8/src/base/platform/platform-aix.cc
  83. 48
      deps/v8/src/base/platform/platform-cygwin.cc
  84. 48
      deps/v8/src/base/platform/platform-freebsd.cc
  85. 62
      deps/v8/src/base/platform/platform-linux.cc
  86. 58
      deps/v8/src/base/platform/platform-macos.cc
  87. 48
      deps/v8/src/base/platform/platform-openbsd.cc
  88. 81
      deps/v8/src/base/platform/platform-posix.cc
  89. 58
      deps/v8/src/base/platform/platform-qnx.cc
  90. 48
      deps/v8/src/base/platform/platform-solaris.cc
  91. 76
      deps/v8/src/base/platform/platform-win32.cc
  92. 18
      deps/v8/src/base/platform/platform.h
  93. 2
      deps/v8/src/base/platform/semaphore.h
  94. 33
      deps/v8/src/base/platform/time.cc
  95. 6
      deps/v8/src/base/platform/time.h
  96. 2
      deps/v8/src/base/sys-info.h
  97. 2
      deps/v8/src/base/utils/random-number-generator.h
  98. 7
      deps/v8/src/bit-vector.h
  99. 590
      deps/v8/src/bootstrapper.cc
  100. 15
      deps/v8/src/bootstrapper.h

1
deps/v8/AUTHORS

@ -76,6 +76,7 @@ Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>

105
deps/v8/BUILD.gn

@ -32,9 +32,14 @@ v8_toolset_for_d8 = "host"
# TODO(GYP): For now we only support 32-bit little-endian target builds from an
# x64 Linux host. Eventually we need to support all of the host/target
# configurations v8 runs on.
if (host_cpu == "x64" && host_os == "linux" &&
(target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86")) {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
if (host_cpu == "x64" && host_os == "linux") {
if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
} else if (target_cpu == "x64") {
snapshot_toolchain = "//build/toolchain/linux:clang_x64"
} else {
assert(false, "Need environment for this arch")
}
} else {
snapshot_toolchain = default_toolchain
}
@ -196,6 +201,8 @@ action("js2c") {
inputs = [ "tools/jsmin.py" ]
sources = [
"src/macros.py",
"src/messages.h",
"src/runtime.js",
"src/v8natives.js",
"src/symbol.js",
@ -222,7 +229,6 @@ action("js2c") {
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/templates.js",
"src/macros.py",
]
outputs = [
@ -258,6 +264,7 @@ action("js2c_experimental") {
sources = [
"src/macros.py",
"src/messages.h",
"src/proxy.js",
"src/generator.js",
"src/harmony-array.js",
@ -265,7 +272,9 @@ action("js2c_experimental") {
"src/harmony-typedarray.js",
"src/harmony-tostring.js",
"src/harmony-regexp.js",
"src/harmony-reflect.js"
"src/harmony-reflect.js",
"src/harmony-spread.js",
"src/harmony-object.js"
]
outputs = [
@ -287,6 +296,36 @@ action("js2c_experimental") {
}
}
action("js2c_extras") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
inputs = [ "tools/jsmin.py" ]
sources = v8_extra_library_files
outputs = [
"$target_gen_dir/extras-libraries.cc",
]
args = [
rebase_path("$target_gen_dir/extras-libraries.cc",
root_build_dir),
"EXTRAS",
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_extras.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_extras.bin", root_build_dir),
]
}
}
action("d8_js2c") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -312,11 +351,13 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
]
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
]
outputs = [
@ -330,7 +371,12 @@ if (v8_use_external_startup_data) {
}
action("postmortem-metadata") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
# Only targets in this file and the top-level visibility target can
# depend on this.
visibility = [
":*",
"//:gn_visibility",
]
script = "tools/gen-postmortem-metadata.py"
@ -396,12 +442,14 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":v8_base",
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"src/snapshot/snapshot-empty.cc",
]
@ -415,11 +463,17 @@ source_set("v8_nosnapshot") {
}
source_set("v8_snapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
# Only targets in this file and the top-level visibility target can
# depend on this.
visibility = [
":*",
"//:gn_visibility",
]
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
]
@ -427,6 +481,7 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/snapshot.cc",
]
@ -446,6 +501,7 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
":natives_blob",
@ -530,6 +586,8 @@ source_set("v8_base") {
"src/codegen.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-dependencies.cc",
"src/compilation-dependencies.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
"src/compiler/access-builder.cc",
@ -555,6 +613,7 @@ source_set("v8_base") {
"src/compiler/common-operator.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.cc",
"src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc",
"src/compiler/control-flow-optimizer.h",
@ -562,6 +621,10 @@ source_set("v8_base") {
"src/compiler/control-reducer.h",
"src/compiler/diamond.h",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
"src/compiler/frame-elider.h",
"src/compiler/frame-states.cc",
"src/compiler/frame-states.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-builder.h",
@ -665,6 +728,8 @@ source_set("v8_base") {
"src/compiler/source-position.h",
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
@ -758,6 +823,8 @@ source_set("v8_base") {
"src/heap/heap-inl.h",
"src/heap/heap.cc",
"src/heap/heap.h",
"src/heap/identity-map.cc",
"src/heap/identity-map.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/mark-compact-inl.h",
@ -887,8 +954,8 @@ source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
"src/optimizing-compiler-thread.cc",
"src/optimizing-compiler-thread.h",
"src/optimizing-compile-dispatcher.cc",
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parser.cc",
@ -964,6 +1031,7 @@ source_set("v8_base") {
"src/scopeinfo.h",
"src/scopes.cc",
"src/scopes.h",
"src/signature.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot/natives.h",
@ -1006,7 +1074,6 @@ source_set("v8_base") {
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/unique.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
"src/v8.cc",
@ -1325,6 +1392,7 @@ source_set("v8_libbase") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/base/adapters.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
@ -1398,17 +1466,15 @@ source_set("v8_libbase") {
} else if (is_android) {
defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
if (host_os == "mac") {
if (current_toolchain == host_toolchain) {
if (current_toolchain == host_toolchain) {
libs = [ "dl", "rt" ]
if (host_os == "mac") {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
sources += [ "src/base/platform/platform-linux.cc" ]
}
} else {
sources += [ "src/base/platform/platform-linux.cc" ]
if (current_toolchain == host_toolchain) {
defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
}
}
} else if (is_mac) {
sources += [ "src/base/platform/platform-macos.cc" ]
@ -1524,7 +1590,7 @@ if (component_mode == "shared_library") {
":toolchain",
]
direct_dependent_configs = [ ":external_config" ]
public_configs = [ ":external_config" ]
libs = []
if (is_android && current_toolchain != host_toolchain) {
@ -1551,7 +1617,7 @@ if (component_mode == "shared_library") {
]
}
direct_dependent_configs = [ ":external_config" ]
public_configs = [ ":external_config" ]
}
}
@ -1568,7 +1634,10 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [
":internal_config",
# Note: don't use :internal_config here because this target will get
# the :external_config applied to it by virtue of depending on :v8, and
# you can't have both applied to the same target.
":internal_config_base",
":features",
":toolchain",
]

478
deps/v8/ChangeLog

@ -1,3 +1,481 @@
2015-05-11: Version 4.4.63
Let Runtime_GrowArrayElements accept non-Smi numbers as |key| (Chromium
issue 485410).
Make one copy for all TypedArray methods (issue 4085).
Performance and stability improvements on all platforms.
2015-05-09: Version 4.4.62
[turbofan] Fix handling of OsrLoopEntry in ControlReducer::ConnectNTL()
(Chromium issue 485908).
Performance and stability improvements on all platforms.
2015-05-08: Version 4.4.61
Performance and stability improvements on all platforms.
2015-05-08: Version 4.4.60
Performance and stability improvements on all platforms.
2015-05-08: Version 4.4.59
Performance and stability improvements on all platforms.
2015-05-07: Version 4.4.58
TypedArray.prototype.every method (issue 3578).
[V8] Reland https://codereview.chromium.org/1121833003/ (Chromium issue
480652).
Performance and stability improvements on all platforms.
2015-05-07: Version 4.4.57
Performance and stability improvements on all platforms.
2015-05-06: Version 4.4.56
Shard v8_base.lib on Windows to avoid 2G .lib limit (Chromium issue
485155).
Implement a 'trial parse' step, that will abort pre-parsing excessively
long and trivial functions, so that they can be eagerly compiled after
all. This essentially allows the parser to renege on its earlier
decision to lazy-parse, if additional information suggests it was a bad
decision (Chromium issue 470930).
Performance and stability improvements on all platforms.
2015-05-06: Version 4.4.55
Handle the case when derived constructor is [[Call]]ed with 0 args
(Chromium issue 474783).
freed_nodes in global-handles should be addititive (Chromium issues
479796, 484671).
[V8] Reland https://codereview.chromium.org/1100993003/ (Chromium issue
480652).
[es6] When comparing two symbols we may need to throw a TypeError (issue
4073).
Performance and stability improvements on all platforms.
2015-05-06: Version 4.4.54
Performance and stability improvements on all platforms.
2015-05-05: Version 4.4.53
Performance and stability improvements on all platforms.
2015-05-05: Version 4.4.52
Performance and stability improvements on all platforms.
2015-05-04: Version 4.4.51
Performance and stability improvements on all platforms.
2015-05-04: Version 4.4.50
Performance and stability improvements on all platforms.
2015-05-01: Version 4.4.49
Performance and stability improvements on all platforms.
2015-05-01: Version 4.4.48
[V8] Use previous token location as EOS token location (Chromium issue
480652).
Implement kToBeExecutedOnceCodeAge (Chromium issue 470930).
Performance and stability improvements on all platforms.
2015-04-30: Version 4.4.47
Performance and stability improvements on all platforms.
2015-04-30: Version 4.4.46
Performance and stability improvements on all platforms.
2015-04-29: Version 4.4.45
Performance and stability improvements on all platforms.
2015-04-29: Version 4.4.44
Pass ArrayBuffer::Allocator via Isolate::CreateParams.
Fix unobservable constructor replacement on prototype maps (Chromium
issue 478522).
Performance and stability improvements on all platforms.
2015-04-29: Version 4.4.43
Performance and stability improvements on all platforms.
2015-04-28: Version 4.4.42
MIPS: Fix FP load/store with large offsets from base register (Chromium
issue 481519).
Extending v8::GetHeapStatistics to return total available size (Chromium
issue 476013).
Performance and stability improvements on all platforms.
2015-04-28: Version 4.4.41
Performance and stability improvements on all platforms.
2015-04-28: Version 4.4.40
Do more to avoid last-resort stop-the-world GC (Chromium issue 481433).
Performance and stability improvements on all platforms.
2015-04-27: Version 4.4.39
Performance and stability improvements on all platforms.
2015-04-27: Version 4.4.38
Performance and stability improvements on all platforms.
2015-04-25: Version 4.4.37
Performance and stability improvements on all platforms.
2015-04-24: Version 4.4.36
Performance and stability improvements on all platforms.
2015-04-24: Version 4.4.35
Performance and stability improvements on all platforms.
2015-04-24: Version 4.4.34
Performance and stability improvements on all platforms.
2015-04-23: Version 4.4.33
Performance and stability improvements on all platforms.
2015-04-23: Version 4.4.32
Performance and stability improvements on all platforms.
2015-04-23: Version 4.4.31
Performance and stability improvements on all platforms.
2015-04-22: Version 4.4.30
Performance and stability improvements on all platforms.
2015-04-22: Version 4.4.29
Performance and stability improvements on all platforms.
2015-04-21: Version 4.4.28
Performance and stability improvements on all platforms.
2015-04-21: Version 4.4.27
Performance and stability improvements on all platforms.
2015-04-20: Version 4.4.26
Allow for accessing an ArrayBuffer contents without externalizing it.
Remove support for externally backed elements from the API (issue 3996).
Deprecate 3-args ResourceConstraints::ConfigureDefaults.
Indicate that low-memory-notificatin triggered GCs are "forced".
Adding missing V8_EXPORT flag in SpaceStatistics class in v8.h (Chromium
issues 466141, 476013).
Performance and stability improvements on all platforms.
2015-04-20: Version 4.4.25
Turn off SupportsFlexibleFloorAndRound for Arm64 due to a bug (Chromium
issue 476477).
Adding V8 api to get memory statistics of spaces in V8::Heap (Chromium
issues 466141, 476013).
Performance and stability improvements on all platforms.
2015-04-17: Version 4.4.24
Performance and stability improvements on all platforms.
2015-04-17: Version 4.4.23
Don't crash when reporting an access check failure for a detached global
proxy (Chromium issue 475884).
Use smaller heap growing factor in idle notification to start
incremental marking when there is idle time >16ms (Chromium issue
477323).
Performance and stability improvements on all platforms.
2015-04-16: Version 4.4.22
Reduce regexp compiler stack size when not optimizing regexps (Chromium
issue 475705).
Performance and stability improvements on all platforms.
2015-04-15: Version 4.4.21
Remove support for specifying the number of available threads.
When converting Maybe and MaybeLocal values with a check, always check.
Performance and stability improvements on all platforms.
2015-04-15: Version 4.4.20
Performance and stability improvements on all platforms.
2015-04-15: Version 4.4.19
Reland "Remove support for thread-based recompilation" (issue 3608).
Performance and stability improvements on all platforms.
2015-04-14: Version 4.4.18
Reland "Remove support for thread-based recompilation" (issue 3608).
Performance and stability improvements on all platforms.
2015-04-14: Version 4.4.17
Performance and stability improvements on all platforms.
2015-04-13: Version 4.4.16
Expose ArrayBufferView::HasBuffer (issue 3996).
Performance and stability improvements on all platforms.
2015-04-13: Version 4.4.15
Performance and stability improvements on all platforms.
2015-04-12: Version 4.4.14
Performance and stability improvements on all platforms.
2015-04-12: Version 4.4.13
Performance and stability improvements on all platforms.
2015-04-10: Version 4.4.12
Performance and stability improvements on all platforms.
2015-04-10: Version 4.4.11
Performance and stability improvements on all platforms.
2015-04-10: Version 4.4.10
Don't #define snprintf in VS2015 - it's illegal and unneeded (Chromium
issue 440500).
Performance and stability improvements on all platforms.
2015-04-09: Version 4.4.9
Performance and stability improvements on all platforms.
2015-04-09: Version 4.4.8
Performance and stability improvements on all platforms.
2015-04-08: Version 4.4.7
Make GetDebugContext a bit more robust (Chromium issue 474538).
Performance and stability improvements on all platforms.
2015-04-08: Version 4.4.6
Performance and stability improvements on all platforms.
2015-04-08: Version 4.4.5
More robust when allocation fails during compaction (Chromium issue
473307).
MIPS: JSEntryTrampoline: check for stack space before pushing arguments
(Chromium issue 469768).
Performance and stability improvements on all platforms.
2015-04-07: Version 4.4.4
Debugger: remove debug command API.
Remove support for thread-based recompilation (issue 3608).
JSEntryTrampoline: check for stack space before pushing arguments
(Chromium issue 469768).
Performance and stability improvements on all platforms.
2015-04-07: Version 4.4.3
Performance and stability improvements on all platforms.
2015-04-06: Version 4.4.2
Performance and stability improvements on all platforms.
2015-04-06: Version 4.4.1
Support for typed arrays added to Heap::RightTrimFixedArray() (Chromium
issue 472513).
Expose an API on ArrayBufferView to copy out content w/o changing the
buffer (issue 3996).
Performance and stability improvements on all platforms.
2015-04-02: Version 4.3.66
Reland: Fix JSON parser Handle leak (previous CL 1041483004) (issue
3976, Chromium issue 472504).
Turn off overapproximation of the weak closure again (issue 3862).
Performance and stability improvements on all platforms.
2015-04-01: Version 4.3.65
Performance and stability improvements on all platforms.
2015-04-01: Version 4.3.64
Performance and stability improvements on all platforms.
2015-04-01: Version 4.3.63
[V8] Don't ignore sourceURL comment in inline scripts in .stack (issue
3920).
Deprecate IdleNotification().
Remove --harmony-numeric-literal flag.
Performance and stability improvements on all platforms.
2015-03-31: Version 4.3.62
Put newspace evacuation in an EvacuationScope (Chromium issue 471554).
Fix libdl dependency on Android and remove librt hack (Chromium issue
469973).
Ensure that GC idle notifications either make progress or stop
requesting more GCs (Chromium issue 470615).
Layout descriptor must be trimmed when corresponding descriptors array
is trimmed to stay in sync (Chromium issue 470804).
Fix JSON parser Handle leak (issue 3976).
Performance and stability improvements on all platforms.
2015-03-30: Version 4.3.61
Performance and stability improvements on all platforms.

8
deps/v8/DEPS

@ -8,17 +8,17 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "d174d75bf69c682cb62af9187879e01513b35e52",
Var("git_url") + "/external/gyp.git" + "@" + "0bb67471bca068996e15b56738fa4824dfa19de0",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "7c81740601355556e630da515b74d889ba2f8d08",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "f8c0e585b0a046d83d72b5d37356cb50d5b2031a",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "3b302fef93f7cc58d9b8168466905237484b2772",
Var("git_url") + "/chromium/buildtools.git" + "@" + "b0ede9c89f9d5fbe5387d961ad4c0ec665b6c821",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "ea2f0a2d96ffc6f5a51c034db704ccc1a6543156",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5bab78c6ced45a71a8e095a09697ca80492e57e1",
}
deps_os = {

4
deps/v8/Makefile

@ -97,6 +97,10 @@ endif
ifeq ($(slowdchecks), off)
GYPFLAGS += -Dv8_enable_slow_dchecks=0
endif
# debugsymbols=on
ifeq ($(debugsymbols), on)
GYPFLAGS += -Drelease_extra_cflags=-ggdb3
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1

4
deps/v8/PRESUBMIT.py

@ -249,12 +249,16 @@ def GetPreferredTryMasters(project, change):
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux64_asan_rel': set(['defaulttests']),
'v8_linux64_avx2_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win_compile_dbg': set(['defaulttests']),
'v8_win_nosnap_shared_compile_rel': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_linux_arm_rel': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_mipsel_compile_rel': set(['defaulttests']),
'v8_linux_mips64el_compile_rel': set(['defaulttests']),
'v8_android_arm_compile_rel': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
},

12
deps/v8/WATCHLISTS

@ -36,11 +36,23 @@
'public_api': {
'filepath': 'include/',
},
'snapshot': {
'filepath': 'src/snapshot/',
},
'debugger': {
'filepath': 'src/debug\.(cc|h)|src/.*-debugger\.js|src/runtime/runtime-debug\.cc',
},
},
'WATCHLISTS': {
'public_api': [
'phajdan.jr@chromium.org',
],
'snapshot': [
'yangguo@chromium.org',
],
'debugger': [
'yangguo@chromium.org',
],
},
}

1
deps/v8/build/OWNERS

@ -0,0 +1 @@
machenbach@chromium.org

73
deps/v8/build/android.gypi

@ -69,9 +69,6 @@
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
# Enable to use the system stlport, otherwise statically
# link the NDK one?
'use_system_stlport%': '<(android_webview_build)',
'android_stlport_library': 'stlport_static',
}, # variables
'target_defaults': {
@ -108,6 +105,7 @@
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
@ -127,6 +125,8 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
@ -146,12 +146,6 @@
'-lm',
],
'conditions': [
['android_webview_build==0', {
'ldflags': [
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
}],
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
@ -164,48 +158,23 @@
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
# NOTE: The stlport header include paths below are specified in
# cflags rather than include_dirs because they need to come
# after include_dirs. Think of them like system headers, but
# don't use '-isystem' because the arm-linux-androideabi-4.4.3
# toolchain (circa Gingerbread) will exhibit strange errors.
# The include ordering here is important; change with caution.
['use_system_stlport==0', {
'cflags': [
'-I<(android_stlport_include)',
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
'conditions': [
['target_arch=="arm" and arm_version==7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="mipsel"', {
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
@ -216,6 +185,9 @@
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
@ -226,6 +198,9 @@
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
'cflags': [

4
deps/v8/build/features.gypi

@ -102,7 +102,7 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@ -112,7 +112,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 1,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {

55
deps/v8/build/standalone.gypi

@ -35,12 +35,17 @@
'component%': 'static_library',
'clang_dir%': 'third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0,
# Track where uninitialized memory originates from. From fastest to
# slowest: 0 - no tracking, 1 - track only the initial allocation site, 2
# - track the chain of stores leading from allocation site to use site.
'msan_track_origins%': 1,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
'variables': {
'variables': {
'variables': {
@ -65,6 +70,10 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
'tsan%': 0,
# goma settings.
# 1 to use goma.
@ -86,6 +95,10 @@
'werror%': '-Werror',
'use_goma%': '<(use_goma)',
'gomadir%': '<(gomadir)',
'asan%': '<(asan)',
'lsan%': '<(lsan)',
'msan%': '<(msan)',
'tsan%': '<(tsan)',
# .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such
@ -157,6 +170,10 @@
}, {
'host_clang%': '0',
}],
['asan==1 or lsan==1 or msan==1 or tsan==1', {
'clang%': 1,
'use_allocator%': 'none',
}],
],
# Default ARM variable settings.
'arm_version%': 'default',
@ -196,7 +213,7 @@
# Xcode insists on this empty entry.
},
'Release': {
# Xcode insists on this empty entry.
'cflags+': ['<@(release_extra_cflags)'],
},
},
'conditions':[
@ -226,6 +243,7 @@
'-Wall',
'-Werror',
'-Wextra',
'-Wshorten-64-to-32',
],
'cflags+': [
# Clang considers the `register` keyword as deprecated, but
@ -302,6 +320,36 @@
],
},
}],
['msan==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
'-fPIC',
],
'cflags+': [
'-fPIC',
],
'cflags!': [
'-fno-exceptions',
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=memory',
],
'defines': [
'MEMORY_SANITIZER',
],
'dependencies': [
# Use libc++ (third_party/libc++ and third_party/libc++abi) instead of
# stdlibc++ as standard library. This is intended to use for instrumented
# builds.
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
],
},
}],
['asan==1 and OS=="mac"', {
'target_defaults': {
'xcode_settings': {
@ -342,6 +390,11 @@
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
# TODO(arm64): It'd be nice to enable this for arm64 as well,
# but the Assembler requires some serious fixing first.
[ 'clang==1 and v8_target_arch=="x64"', {
'cflags': [ '-Wshorten-64-to-32' ],
}],
[ 'host_arch=="ppc64" and OS!="aix"', {
'cflags': [ '-mminimal-toc' ],
}],

30
deps/v8/build/toolchain.gypi

@ -131,14 +131,6 @@
# Link-Time Optimizations
'use_lto%': 0,
'variables': {
# This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend.
'android_webview_build%': 0,
},
# Copy it out one scope.
'android_webview_build%': '<(android_webview_build)',
},
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
@ -203,7 +195,7 @@
'target_conditions': [
['_toolset=="host"', {
'conditions': [
['v8_target_arch==host_arch and android_webview_build==0', {
['v8_target_arch==host_arch', {
# Host built with an Arm CXX compiler.
'conditions': [
[ 'arm_version==7', {
@ -246,7 +238,7 @@
}], # _toolset=="host"
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
['v8_target_arch==target_arch', {
# Target built with an Arm CXX compiler.
'conditions': [
[ 'arm_version==7', {
@ -370,7 +362,7 @@
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
['v8_target_arch==target_arch', {
# Target built with a Mips CXX compiler.
'cflags': [
'-EB',
@ -557,7 +549,7 @@
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
['v8_target_arch==target_arch', {
# Target built with a Mips CXX compiler.
'cflags': [
'-EL',
@ -761,7 +753,7 @@
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
['v8_target_arch==target_arch', {
'cflags': [
'-EL',
'-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
@ -929,12 +921,6 @@
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
# Enable feedback-directed optimisation when building in android.
[ 'android_webview_build == 1', {
'aosp_build_settings': {
'LOCAL_FDO_SUPPORT': 'true',
},
}],
],
'xcode_settings': {
'ARCHS': [ 'i386' ],
@ -960,12 +946,6 @@
'cflags': [ '-m64' ],
'ldflags': [ '-m64' ],
}],
# Enable feedback-directed optimisation when building in android.
[ 'android_webview_build == 1', {
'aosp_build_settings': {
'LOCAL_FDO_SUPPORT': 'true',
},
}],
]
}],
],

2
deps/v8/include/OWNERS

@ -0,0 +1,2 @@
danno@chromium.org
jochen@chromium.org

8
deps/v8/include/v8-debug.h

@ -22,7 +22,6 @@ enum DebugEvent {
CompileError = 6,
PromiseEvent = 7,
AsyncTaskEvent = 8,
BreakForCommand = 9
};
@ -170,13 +169,6 @@ class V8_EXPORT Debug {
// Check if a debugger break is scheduled in the given isolate.
static bool CheckDebugBreak(Isolate* isolate);
// Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in
// EventDetails to EventCallback2 at the moment when the VM actually
// stops.
static void DebugBreakForCommand(Isolate* isolate, ClientData* data);
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(MessageHandler handler);

32
deps/v8/include/v8-profiler.h

@ -5,6 +5,7 @@
#ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_
#include <vector>
#include "v8.h"
/**
@ -17,6 +18,34 @@ struct HeapStatsUpdate;
typedef uint32_t SnapshotObjectId;
struct CpuProfileDeoptFrame {
int script_id;
size_t position;
};
} // namespace v8
#ifdef V8_OS_WIN
template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
#endif
namespace v8 {
struct V8_EXPORT CpuProfileDeoptInfo {
/** A pointer to a static string owned by v8. */
const char* deopt_reason;
std::vector<CpuProfileDeoptFrame> stack;
};
} // namespace v8
#ifdef V8_OS_WIN
template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
#endif
namespace v8 {
/**
* CpuProfileNode represents a node in a call graph.
*/
@ -85,6 +114,9 @@ class V8_EXPORT CpuProfileNode {
/** Retrieves a child node by index. */
const CpuProfileNode* GetChild(int index) const;
/** Retrieves deopt infos for the node. */
const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};

49
deps/v8/include/v8-util.h

@ -117,26 +117,25 @@ class DefaultGlobalMapTraits : public StdMapTraits<K, V> {
public:
// Weak callback & friends:
static const PersistentContainerCallbackType kCallbackType = kNotWeak;
typedef PersistentValueMap<K, V, DefaultGlobalMapTraits<K, V> > MapType;
typedef void WeakCallbackInfoType;
typedef GlobalValueMap<K, V, DefaultGlobalMapTraits<K, V> > MapType;
typedef void WeakCallbackDataType;
static WeakCallbackInfoType* WeakCallbackParameter(MapType* map, const K& key,
static WeakCallbackDataType* WeakCallbackParameter(MapType* map, const K& key,
Local<V> value) {
return nullptr;
}
static MapType* MapFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackInfoType>& data) {
const WeakCallbackInfo<WeakCallbackDataType>& data) {
return nullptr;
}
static K KeyFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackInfoType>& data) {
const WeakCallbackInfo<WeakCallbackDataType>& data) {
return K();
}
static void DisposeCallbackData(WeakCallbackInfoType* data) {}
static void DisposeCallbackData(WeakCallbackDataType* data) {}
static void Dispose(Isolate* isolate, Global<V> value, K key) {}
static void DisposeWeak(Isolate* isolate,
const WeakCallbackInfo<WeakCallbackInfoType>& data,
K key) {}
// This is a second pass callback, so SetSecondPassCallback cannot be called.
static void DisposeWeak(const WeakCallbackInfo<WeakCallbackDataType>& data) {}
private:
template <typename T>
@ -453,7 +452,7 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
: WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), WeakCallback,
Traits::WeakCallbackParameter(this, key, value), FirstWeakCallback,
callback_type);
}
PersistentContainerValue old_value =
@ -472,16 +471,20 @@ class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
}
private:
static void WeakCallback(
static void FirstWeakCallback(
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
GlobalValueMap<K, V, Traits>* persistentValueMap =
Traits::MapFromWeakCallbackInfo(data);
auto map = Traits::MapFromWeakCallbackInfo(data);
K key = Traits::KeyFromWeakCallbackInfo(data);
persistentValueMap->RemoveWeak(key);
Traits::DisposeWeak(data.GetIsolate(), data, key);
map->RemoveWeak(key);
data.SetSecondPassCallback(SecondWeakCallback);
}
}
static void SecondWeakCallback(
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
Traits::DisposeWeak(data);
}
};
@ -501,6 +504,22 @@ class StdPersistentValueMap : public PersistentValueMap<K, V, Traits> {
};
/**
* A map that uses Global as value and std::map as the backing
* implementation. Globals are held non-weak.
*
* C++11 embedders don't need this class, as they can use
* Global directly in std containers.
*/
template <typename K, typename V,
typename Traits = DefaultGlobalMapTraits<K, V> >
class StdGlobalValueMap : public GlobalValueMap<K, V, Traits> {
public:
explicit StdGlobalValueMap(Isolate* isolate)
: GlobalValueMap<K, V, Traits>(isolate) {}
};
class DefaultPersistentValueVectorTraits {
public:
typedef std::vector<PersistentContainerValue> Impl;

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 61
#define V8_PATCH_LEVEL 21
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 63
#define V8_PATCH_LEVEL 9
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

571
deps/v8/include/v8.h

File diff suppressed because it is too large

36
deps/v8/include/v8config.h

@ -162,9 +162,6 @@
// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
// V8_HAS_CXX11_DELETE - deleted functions supported
// V8_HAS_CXX11_FINAL - final marker supported
// V8_HAS_CXX11_OVERRIDE - override marker supported
//
// Compiler-specific feature detection
//
@ -190,7 +187,6 @@
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
// V8_HAS_DECLSPEC_SELECTANY - __declspec(selectany) supported
// V8_HAS___FINAL - __final supported in non-C++11 mode
// V8_HAS___FORCEINLINE - __forceinline supported
//
// Note that testing for compilers and/or features must be done using #if
@ -230,9 +226,6 @@
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
# define V8_HAS_CXX11_DELETE (__has_feature(cxx_deleted_functions))
# define V8_HAS_CXX11_FINAL (__has_feature(cxx_override_control))
# define V8_HAS_CXX11_OVERRIDE (__has_feature(cxx_override_control))
#elif defined(__GNUC__)
@ -274,12 +267,6 @@
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_CXX11_DELETE (V8_GNUC_PREREQ(4, 4, 0))
# define V8_HAS_CXX11_OVERRIDE (V8_GNUC_PREREQ(4, 7, 0))
# define V8_HAS_CXX11_FINAL (V8_GNUC_PREREQ(4, 7, 0))
# else
// '__final' is a non-C++11 GCC synonym for 'final', per GCC r176655.
# define V8_HAS___FINAL (V8_GNUC_PREREQ(4, 7, 0))
# endif
#elif defined(_MSC_VER)
@ -288,9 +275,6 @@
# define V8_HAS___ALIGNOF 1
# define V8_HAS_CXX11_FINAL 1
# define V8_HAS_CXX11_OVERRIDE 1
# define V8_HAS_DECLSPEC_ALIGN 1
# define V8_HAS_DECLSPEC_DEPRECATED 1
# define V8_HAS_DECLSPEC_NOINLINE 1
@ -357,26 +341,6 @@ declarator __attribute__((deprecated))
#endif
// A macro to specify that a method is deleted from the corresponding class.
// Any attempt to use the method will always produce an error at compile time
// when this macro can be implemented (i.e. if the compiler supports C++11).
// If the current compiler does not support C++11, use of the annotated method
// will still cause an error, but the error will most likely occur at link time
// rather than at compile time. As a backstop, method declarations using this
// macro should be private.
// Use like:
// class A {
// private:
// A(const A& other) V8_DELETE;
// A& operator=(const A& other) V8_DELETE;
// };
#if V8_HAS_CXX11_DELETE
# define V8_DELETE = delete
#else
# define V8_DELETE /* NOT SUPPORTED */
#endif
// This macro allows to specify memory alignment for structs, classes, etc.
// Use like:
// class V8_ALIGNED(16) MyClass { ... };

34
deps/v8/samples/process.cc

@ -29,12 +29,26 @@
#include <include/libplatform/libplatform.h>
#include <stdlib.h>
#include <string.h>
#include <map>
#include <string>
using namespace std;
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
// and then add scripting capabilities that allow you to interact with
@ -595,18 +609,21 @@ Handle<String> ReadFile(Isolate* isolate, const string& name) {
if (file == NULL) return Handle<String>();
fseek(file, 0, SEEK_END);
int size = ftell(file);
size_t size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
for (size_t i = 0; i < size;) {
i += fread(&chars[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return Handle<String>();
}
}
fclose(file);
Handle<String> result =
String::NewFromUtf8(isolate, chars, String::kNormalString, size);
Handle<String> result = String::NewFromUtf8(
isolate, chars, String::kNormalString, static_cast<int>(size));
delete[] chars;
return result;
}
@ -653,7 +670,10 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
Isolate* isolate = Isolate::New();
ArrayBufferAllocator array_buffer_allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &array_buffer_allocator;
Isolate* isolate = Isolate::New(create_params);
Isolate::Scope isolate_scope(isolate);
HandleScope scope(isolate);
Handle<String> source = ReadFile(isolate, file);

20
deps/v8/samples/shell.cc

@ -82,8 +82,9 @@ int main(int argc, char* argv[]) {
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
v8::Isolate* isolate = v8::Isolate::New();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &array_buffer_allocator;
v8::Isolate* isolate = v8::Isolate::New(create_params);
run_shell = (argc == 1);
int result;
{
@ -238,18 +239,21 @@ v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
if (file == NULL) return v8::Handle<v8::String>();
fseek(file, 0, SEEK_END);
int size = ftell(file);
size_t size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
for (size_t i = 0; i < size;) {
i += fread(&chars[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return v8::Handle<v8::String>();
}
}
fclose(file);
v8::Handle<v8::String> result =
v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
v8::Handle<v8::String> result = v8::String::NewFromUtf8(
isolate, chars, v8::String::kNormalString, static_cast<int>(size));
delete[] chars;
return result;
}

155
deps/v8/src/accessors.cc

@ -13,6 +13,7 @@
#include "src/frames-inl.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/messages.h"
#include "src/property-details.h"
#include "src/prototype.h"
@ -78,24 +79,9 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
return
CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, object_offset);
case JS_TYPED_ARRAY_TYPE:
return
CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return
CheckForName(name, isolate->factory()->byte_length_string(),
JSArrayBuffer::kByteLengthOffset, object_offset);
case JS_DATA_VIEW_TYPE:
return
CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset);
return CheckForName(name, isolate->factory()->byte_length_string(),
JSArrayBuffer::kByteLengthOffset, object_offset);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
@ -107,6 +93,39 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
}
bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Handle<Name> name,
int* object_offset) {
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
case JS_TYPED_ARRAY_TYPE:
// %TypedArray%.prototype is non-configurable, and so are the following
// named properties on %TypedArray%.prototype, so we can directly inline
// the field-load for typed array maps that still use their
// %TypedArray%.prototype.
if (JSFunction::cast(map->GetConstructor())->prototype() !=
map->prototype()) {
return false;
}
return CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset);
case JS_DATA_VIEW_TYPE:
return CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset);
default:
return false;
}
}
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::Name> name, Handle<Object> value) {
@ -242,8 +261,8 @@ void Accessors::ArrayLengthSetter(
return;
}
Handle<Object> exception = isolate->factory()->NewRangeError(
"invalid_array_length", HandleVector<Object>(NULL, 0));
Handle<Object> exception =
isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
isolate->ScheduleThrow(*exception);
}
@ -304,98 +323,6 @@ Handle<AccessorInfo> Accessors::StringLengthInfo(
}
template <typename Char>
inline int CountRequiredEscapes(Handle<String> source) {
DisallowHeapAllocation no_gc;
int escapes = 0;
Vector<const Char> src = source->GetCharVector<Char>();
for (int i = 0; i < src.length(); i++) {
if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
}
return escapes;
}
template <typename Char, typename StringType>
inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
Handle<StringType> result) {
DisallowHeapAllocation no_gc;
Vector<const Char> src = source->GetCharVector<Char>();
Vector<Char> dst(result->GetChars(), result->length());
int s = 0;
int d = 0;
while (s < src.length()) {
if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
dst[d++] = src[s++];
}
DCHECK_EQ(result->length(), d);
return result;
}
MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
Handle<String> source) {
String::Flatten(source);
if (source->length() == 0) return isolate->factory()->query_colon_string();
bool one_byte = source->IsOneByteRepresentationUnderneath();
int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
: CountRequiredEscapes<uc16>(source);
if (escapes == 0) return source;
int length = source->length() + escapes;
if (one_byte) {
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
isolate->factory()->NewRawOneByteString(length),
String);
return WriteEscapedRegExpSource<uint8_t>(source, result);
} else {
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
isolate->factory()->NewRawTwoByteString(length),
String);
return WriteEscapedRegExpSource<uc16>(source, result);
}
}
// Implements ECMA262 ES6 draft 21.2.5.9
void Accessors::RegExpSourceGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> holder =
Utils::OpenHandle(*v8::Local<v8::Value>(info.Holder()));
Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(holder);
Handle<String> result;
if (regexp->TypeTag() == JSRegExp::NOT_COMPILED) {
result = isolate->factory()->empty_string();
} else {
Handle<String> pattern(regexp->Pattern(), isolate);
MaybeHandle<String> maybe = EscapeRegExpSource(isolate, pattern);
if (!maybe.ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
return;
}
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
void Accessors::RegExpSourceSetter(v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
}
Handle<AccessorInfo> Accessors::RegExpSourceInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->source_string(),
&RegExpSourceGetter, &RegExpSourceSetter, attributes);
}
//
// Accessors::ScriptColumnOffset
//
@ -1102,7 +1029,7 @@ MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
it.ReconfigureDataProperty(value, it.property_details().attributes());
value = it.WriteDataValue(value);
it.WriteDataValue(value);
if (is_observed && !old_value->SameValue(*value)) {
return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
@ -1517,7 +1444,7 @@ static void ModuleGetExport(
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
@ -1538,7 +1465,7 @@ static void ModuleSetExport(
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}

9
deps/v8/src/accessors.h

@ -21,7 +21,6 @@ namespace internal {
V(FunctionName) \
V(FunctionLength) \
V(FunctionPrototype) \
V(RegExpSource) \
V(ScriptColumnOffset) \
V(ScriptCompilationType) \
V(ScriptContextData) \
@ -81,6 +80,14 @@ class Accessors : public AllStatic {
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
// Returns true for properties that are accessors to ArrayBufferView and
// derived classes fields. If true, *object_offset contains offset of
// object field. The caller still has to check whether the underlying
// buffer was neutered.
static bool IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Handle<Name> name,
int* object_offset);
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate,
Handle<Name> name,

1
deps/v8/src/allocation-site-scopes.h

@ -75,7 +75,6 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
DCHECK(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());

4
deps/v8/src/allocation-tracker.cc

@ -262,8 +262,8 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
HashMap::Entry* entry = id_to_function_info_index_.Lookup(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true);
HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
info->name = names_->GetFunctionName(shared->DebugName());

6
deps/v8/src/api-natives.cc

@ -3,11 +3,15 @@
// found in the LICENSE file.
#include "src/api-natives.h"
#include "src/isolate-inl.h"
#include "src/api.h"
#include "src/isolate.h"
#include "src/lookup.h"
namespace v8 {
namespace internal {
namespace {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,

440
deps/v8/src/api.cc

@ -30,6 +30,7 @@
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
#include "src/heap/spaces.h"
#include "src/heap-profiler.h"
#include "src/heap-snapshot-generator-inl.h"
#include "src/icu_util.h"
@ -217,14 +218,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.new_space_size = &new_space_size;
int new_space_capacity;
heap_stats.new_space_capacity = &new_space_capacity;
intptr_t old_pointer_space_size;
heap_stats.old_pointer_space_size = &old_pointer_space_size;
intptr_t old_pointer_space_capacity;
heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
intptr_t old_data_space_size;
heap_stats.old_data_space_size = &old_data_space_size;
intptr_t old_data_space_capacity;
heap_stats.old_data_space_capacity = &old_data_space_capacity;
intptr_t old_space_size;
heap_stats.old_space_size = &old_space_size;
intptr_t old_space_capacity;
heap_stats.old_space_capacity = &old_space_capacity;
intptr_t code_space_size;
heap_stats.code_space_size = &code_space_size;
intptr_t code_space_capacity;
@ -233,10 +230,6 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.map_space_size = &map_space_size;
intptr_t map_space_capacity;
heap_stats.map_space_capacity = &map_space_capacity;
intptr_t cell_space_size;
heap_stats.cell_space_size = &cell_space_size;
intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
@ -327,8 +320,25 @@ bool RunExtraCode(Isolate* isolate, const char* utf8_source) {
}
namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
} // namespace
StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
i::Isolate* internal_isolate = new i::Isolate(true);
ArrayBufferAllocator allocator;
internal_isolate->set_array_buffer_allocator(&allocator);
Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
StartupData result = {NULL, 0};
{
@ -353,7 +363,7 @@ StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
{
HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
internal_isolate->bootstrapper()->NativesSourceLookup(i);
internal_isolate->bootstrapper()->SourceLookup<i::Natives>(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
@ -452,6 +462,11 @@ ResourceConstraints::ResourceConstraints()
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit,
uint32_t number_of_processors) {
ConfigureDefaults(physical_memory, virtual_memory_limit);
}
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
#if V8_OS_ANDROID
// Android has higher physical memory requirements before raising the maximum
// heap size limits since it has no swap space.
@ -482,8 +497,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice);
}
set_max_available_threads(i::Max(i::Min(number_of_processors, 4u), 1u));
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most
// kMaximalCodeRangeSize.
@ -509,8 +522,6 @@ void SetResourceConstraints(i::Isolate* isolate,
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
isolate->set_max_available_threads(constraints.max_available_threads());
}
@ -592,8 +603,8 @@ Local<Value> V8::GetEternal(Isolate* v8_isolate, int index) {
}
void V8::CheckIsJust(bool is_just) {
Utils::ApiCheck(is_just, "v8::FromJust", "Maybe value is Nothing.");
void V8::FromJustIsNothing() {
Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing.");
}
@ -1187,8 +1198,9 @@ void FunctionTemplate::RemovePrototype() {
// --- O b j e c t T e m p l a t e ---
Local<ObjectTemplate> ObjectTemplate::New(Isolate* isolate) {
return New(reinterpret_cast<i::Isolate*>(isolate), Local<FunctionTemplate>());
Local<ObjectTemplate> ObjectTemplate::New(
Isolate* isolate, v8::Handle<FunctionTemplate> constructor) {
return New(reinterpret_cast<i::Isolate*>(isolate), constructor);
}
@ -1696,8 +1708,8 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> result;
{
i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
i::Handle<i::Object> name_obj;
i::Handle<i::Object> source_map_url;
@ -1726,7 +1738,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
result = i::Compiler::CompileScript(
str, name_obj, line_offset, column_offset, is_embedder_debug_script,
is_shared_cross_origin, source_map_url, isolate->native_context(), NULL,
&script_data, options, i::NOT_NATIVES_CODE, is_module);
@ -1739,7 +1751,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
script_data = NULL;
}
RETURN_ON_FAILED_EXECUTION(UnboundScript);
raw_result = *result;
if ((options == kProduceParserCache || options == kProduceCodeCache) &&
script_data != NULL) {
@ -1753,7 +1764,6 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
delete script_data;
}
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
@ -1952,59 +1962,53 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, "v8::ScriptCompiler::Compile()", Script);
i::StreamedSource* source = v8_source->impl();
i::SharedFunctionInfo* raw_result = nullptr;
{
i::HandleScope scope(isolate);
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
if (!origin.ResourceName().IsEmpty()) {
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
if (!origin.ResourceLineOffset().IsEmpty()) {
script->set_line_offset(i::Smi::FromInt(
static_cast<int>(origin.ResourceLineOffset()->Value())));
}
if (!origin.ResourceColumnOffset().IsEmpty()) {
script->set_column_offset(i::Smi::FromInt(
static_cast<int>(origin.ResourceColumnOffset()->Value())));
}
if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
script->set_is_shared_cross_origin(
origin.ResourceIsSharedCrossOrigin()->IsTrue());
}
if (!origin.ResourceIsEmbedderDebugScript().IsEmpty()) {
script->set_is_embedder_debug_script(
origin.ResourceIsEmbedderDebugScript()->IsTrue());
}
if (!origin.SourceMapUrl().IsEmpty()) {
script->set_source_mapping_url(
*Utils::OpenHandle(*(origin.SourceMapUrl())));
}
source->info->set_script(script);
source->info->set_context(isolate->native_context());
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
if (!origin.ResourceName().IsEmpty()) {
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
if (!origin.ResourceLineOffset().IsEmpty()) {
script->set_line_offset(i::Smi::FromInt(
static_cast<int>(origin.ResourceLineOffset()->Value())));
}
if (!origin.ResourceColumnOffset().IsEmpty()) {
script->set_column_offset(i::Smi::FromInt(
static_cast<int>(origin.ResourceColumnOffset()->Value())));
}
if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
script->set_is_shared_cross_origin(
origin.ResourceIsSharedCrossOrigin()->IsTrue());
}
if (!origin.ResourceIsEmbedderDebugScript().IsEmpty()) {
script->set_is_embedder_debug_script(
origin.ResourceIsEmbedderDebugScript()->IsTrue());
}
if (!origin.SourceMapUrl().IsEmpty()) {
script->set_source_mapping_url(
*Utils::OpenHandle(*(origin.SourceMapUrl())));
}
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
source->info->function() == nullptr);
source->parser->HandleSourceURLComments(isolate, script);
source->info->set_script(script);
source->info->set_context(isolate->native_context());
i::Handle<i::SharedFunctionInfo> result;
if (source->info->function() != nullptr) {
// Parsing has succeeded.
result = i::Compiler::CompileStreamedScript(script, source->info.get(),
str->length());
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
RETURN_ON_FAILED_EXECUTION(Script);
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
source->info->function() == nullptr);
source->parser->HandleSourceURLComments(isolate, script);
source->info->clear_script(); // because script goes out of scope.
raw_result = *result; // TODO(titzer): use CloseAndEscape?
i::Handle<i::SharedFunctionInfo> result;
if (source->info->function() != nullptr) {
// Parsing has succeeded.
result = i::Compiler::CompileStreamedScript(script, source->info.get(),
str->length());
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
RETURN_ON_FAILED_EXECUTION(Script);
source->info->clear_script(); // because script goes out of scope.
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
if (generic.IsEmpty()) return Local<Script>();
Local<Script> bound = generic->BindToCurrentContext();
@ -2285,8 +2289,8 @@ Maybe<int> Message::GetLineNumber(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "GetLineNumber", Utils::OpenHandle(this))
.ToHandle(&result);
!CallV8HeapFunction(isolate, "$messageGetLineNumber",
Utils::OpenHandle(this)).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
return Just(static_cast<int>(result->Number()));
}
@ -2315,8 +2319,9 @@ Maybe<int> Message::GetStartColumn(Local<Context> context) const {
int);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> start_col_obj;
has_pending_exception = !CallV8HeapFunction(isolate, "GetPositionInLine",
self).ToHandle(&start_col_obj);
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
.ToHandle(&start_col_obj);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
return Just(static_cast<int>(start_col_obj->Number()));
}
@ -2333,8 +2338,9 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> start_col_obj;
has_pending_exception = !CallV8HeapFunction(isolate, "GetPositionInLine",
self).ToHandle(&start_col_obj);
has_pending_exception =
!CallV8HeapFunction(isolate, "$messageGetPositionInLine", self)
.ToHandle(&start_col_obj);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
int start = self->start_position();
int end = self->end_position();
@ -2363,8 +2369,8 @@ MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "GetSourceLine", Utils::OpenHandle(this))
.ToHandle(&result);
!CallV8HeapFunction(isolate, "$messageGetSourceLine",
Utils::OpenHandle(this)).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(String);
Local<String> str;
if (result->IsString()) {
@ -2688,11 +2694,11 @@ bool Value::IsTypedArray() const {
}
#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
bool Value::Is##Type##Array() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
return obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == kExternal##Type##Array; \
#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
bool Value::Is##Type##Array() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
return obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array; \
}
TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
@ -3106,11 +3112,10 @@ void v8::TypedArray::CheckCast(Value* that) {
#define CHECK_TYPED_ARRAY_CAST(Type, typeName, TYPE, ctype, size) \
void v8::Type##Array::CheckCast(Value* that) { \
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
Utils::ApiCheck(obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == \
kExternal##Type##Array, \
"v8::" #Type "Array::Cast()", \
"Could not convert to " #Type "Array"); \
Utils::ApiCheck( \
obj->IsJSTypedArray() && \
i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array, \
"v8::" #Type "Array::Cast()", "Could not convert to " #Type "Array"); \
}
@ -3585,7 +3590,7 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
i::Handle<i::Object> args[] = { obj, key_name };
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "ObjectGetOwnPropertyDescriptor",
!CallV8HeapFunction(isolate, "$objectGetOwnPropertyDescriptor",
isolate->factory()->undefined_value(),
arraysize(args), args).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Value);
@ -4212,147 +4217,6 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
}
namespace {
static i::ElementsKind GetElementsKindFromExternalArrayType(
ExternalArrayType array_type) {
switch (array_type) {
#define ARRAY_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
return i::EXTERNAL_##TYPE##_ELEMENTS;
TYPED_ARRAYS(ARRAY_TYPE_TO_ELEMENTS_KIND)
#undef ARRAY_TYPE_TO_ELEMENTS_KIND
}
UNREACHABLE();
return i::DICTIONARY_ELEMENTS;
}
void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
void* data,
ExternalArrayType array_type,
int length) {
i::Isolate* isolate = object->GetIsolate();
i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data);
i::Handle<i::Map> external_array_map =
i::JSObject::GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
i::JSObject::SetMapAndElements(object, external_array_map, array);
}
} // namespace
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
auto self = Utils::OpenHandle(this);
auto isolate = self->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!Utils::ApiCheck(length >= 0 &&
length <= i::ExternalUint8ClampedArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
}
if (!Utils::ApiCheck(!self->IsJSArray(),
"v8::Object::SetIndexedPropertiesToPixelData()",
"JSArray is not supported")) {
return;
}
PrepareExternalArrayElements(self, data, kExternalUint8ClampedArray, length);
}
bool v8::Object::HasIndexedPropertiesInPixelData() {
auto self = Utils::OpenHandle(this);
return self->HasExternalUint8ClampedElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
auto self = Utils::OpenHandle(this);
if (self->HasExternalUint8ClampedElements()) {
return i::ExternalUint8ClampedArray::cast(self->elements())->
external_uint8_clamped_pointer();
}
return nullptr;
}
int v8::Object::GetIndexedPropertiesPixelDataLength() {
auto self = Utils::OpenHandle(this);
if (self->HasExternalUint8ClampedElements()) {
return i::ExternalUint8ClampedArray::cast(self->elements())->length();
}
return -1;
}
void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
int length) {
auto self = Utils::OpenHandle(this);
auto isolate = self->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!Utils::ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"length exceeds max acceptable value")) {
return;
}
if (!Utils::ApiCheck(!self->IsJSArray(),
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"JSArray is not supported")) {
return;
}
PrepareExternalArrayElements(self, data, array_type, length);
}
bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
auto self = Utils::OpenHandle(this);
return self->HasExternalArrayElements();
}
void* v8::Object::GetIndexedPropertiesExternalArrayData() {
auto self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->external_pointer();
}
return nullptr;
}
ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
auto self = Utils::OpenHandle(this);
switch (self->elements()->map()->instance_type()) {
#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
case i::EXTERNAL_##TYPE##_ARRAY_TYPE: \
return kExternal##Type##Array;
TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
#undef INSTANCE_TYPE_TO_ARRAY_TYPE
default:
return static_cast<ExternalArrayType>(-1);
}
}
int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
auto self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->length();
}
return -1;
}
bool v8::Object::IsCallable() {
auto self = Utils::OpenHandle(this);
return self->IsCallable();
@ -5477,6 +5341,13 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
heap_size_limit_(0) { }
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
space_used_size_(0),
space_available_size_(0),
physical_space_size_(0) { }
bool v8::V8::InitializeICU(const char* icu_data_file) {
return i::InitializeICU(icu_data_file);
}
@ -6275,7 +6146,7 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
void Promise::Resolver::Resolve(Handle<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
Resolve(context, value);
USE(Resolve(context, value));
}
@ -6297,7 +6168,7 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
void Promise::Resolver::Reject(Handle<Value> value) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
Reject(context, value);
USE(Reject(context, value));
}
@ -6383,9 +6254,13 @@ bool v8::ArrayBuffer::IsNeuterable() const {
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
Utils::ApiCheck(!self->is_external(), "v8::ArrayBuffer::Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(isolate->heap()->InNewSpace(*self),
self->backing_store());
return GetContents();
}
@ -6462,31 +6337,21 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
size_t byte_offset = i::NumberToSize(isolate, obj->byte_offset());
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
size_t byte_offset = i::NumberToSize(isolate, self->byte_offset());
size_t bytes_to_copy =
i::Min(byte_length, i::NumberToSize(isolate, obj->byte_length()));
i::Min(byte_length, i::NumberToSize(isolate, self->byte_length()));
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
const char* source = nullptr;
if (obj->IsJSDataView()) {
i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
i::Handle<i::JSArrayBuffer> buffer(
i::JSArrayBuffer::cast(data_view->buffer()));
source = reinterpret_cast<char*>(buffer->backing_store());
} else {
DCHECK(obj->IsJSTypedArray());
i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*obj));
if (typed_array->buffer()->IsSmi()) {
i::Handle<i::FixedTypedArrayBase> fixed_array(
i::FixedTypedArrayBase::cast(typed_array->elements()));
source = reinterpret_cast<char*>(fixed_array->DataPtr());
} else {
i::Handle<i::JSArrayBuffer> buffer(
i::JSArrayBuffer::cast(typed_array->buffer()));
source = reinterpret_cast<char*>(buffer->backing_store());
}
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()));
const char* source = reinterpret_cast<char*>(buffer->backing_store());
if (source == nullptr) {
DCHECK(self->IsJSTypedArray());
i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*self));
i::Handle<i::FixedTypedArrayBase> fixed_array(
i::FixedTypedArrayBase::cast(typed_array->elements()));
source = reinterpret_cast<char*>(fixed_array->DataPtr());
}
memcpy(dest, source + byte_offset, bytes_to_copy);
}
@ -6495,11 +6360,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
bool v8::ArrayBufferView::HasBuffer() const {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
if (obj->IsJSDataView()) return true;
DCHECK(obj->IsJSTypedArray());
i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*obj));
return !typed_array->buffer()->IsSmi();
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()));
return buffer->backing_store() != nullptr;
}
@ -6536,7 +6399,7 @@ size_t v8::TypedArray::Length() {
} \
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
v8::kExternal##Type##Array, buffer, byte_offset, length); \
i::kExternal##Type##Array, buffer, byte_offset, length); \
return Utils::ToLocal##Type##Array(obj); \
}
@ -6888,9 +6751,20 @@ Isolate* Isolate::GetCurrent() {
}
Isolate* Isolate::New() {
Isolate::CreateParams create_params;
return New(create_params);
}
Isolate* Isolate::New(const Isolate::CreateParams& params) {
i::Isolate* isolate = new i::Isolate(false);
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
if (params.array_buffer_allocator != NULL) {
isolate->set_array_buffer_allocator(params.array_buffer_allocator);
} else {
isolate->set_array_buffer_allocator(i::V8::ArrayBufferAllocator());
}
if (params.snapshot_blob != NULL) {
isolate->set_snapshot_blob(params.snapshot_blob);
} else {
@ -7017,11 +6891,37 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_heap_size_executable_ =
heap->CommittedMemoryExecutable();
heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
}
size_t Isolate::NumberOfHeapSpaces() {
return i::LAST_SPACE - i::FIRST_SPACE + 1;
}
bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
size_t index) {
if (!space_statistics)
return false;
if (index > i::LAST_SPACE || index < i::FIRST_SPACE)
return false;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
i::Space* space = heap->space(static_cast<int>(index));
space_statistics->space_name_ = heap->GetSpaceName(static_cast<int>(index));
space_statistics->space_size_ = space->CommittedMemory();
space_statistics->space_used_size_ = space->SizeOfObjects();
space_statistics->space_available_size_ = space->Available();
space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
return true;
}
void Isolate::GetStackSample(const RegisterState& state, void** frames,
size_t frames_limit, SampleInfo* sample_info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@ -7408,12 +7308,6 @@ bool Debug::CheckDebugBreak(Isolate* isolate) {
}
void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->EnqueueDebugCommand(data);
}
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
i::Isolate* isolate = i::Isolate::Current();
ENTER_V8(isolate);
@ -7590,6 +7484,12 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
}
const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->deopt_infos();
}
void CpuProfile::Delete() {
i::Isolate* isolate = i::Isolate::Current();
i::CpuProfiler* profiler = isolate->cpu_profiler();

10
deps/v8/src/api.h

@ -339,11 +339,11 @@ inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
}
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
DCHECK(obj->type() == kExternal##Type##Array); \
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
DCHECK(obj->type() == v8::internal::kExternal##Type##Array); \
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}

303
deps/v8/src/arm/assembler-arm.cc

@ -228,7 +228,7 @@ const char* DwVfpRegister::AllocationIndexToString(int index) {
// Implementation of RelocInfo
// static
const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
@ -776,20 +776,14 @@ int Assembler::target_at(int pos) {
// Emitted link to a label, not part of a branch.
return instr;
}
if ((instr & 7 * B25) == 5 * B25) {
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
// b, bl, or blx imm24
if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
return pos + kPcLoadDelta + imm26;
DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
// Internal reference to the label.
DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
int imm26 = (((instr >> 1) & kImm24Mask) << 8) >> 6;
return pos + imm26;
return pos + kPcLoadDelta + imm26;
}
@ -863,25 +857,19 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
return;
}
if ((instr & 7 * B25) == 5 * B25) {
// b, bl, or blx imm24
int imm26 = target_pos - (pos + kPcLoadDelta);
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
DCHECK((imm26 & 1) == 0);
instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
} else {
DCHECK((imm26 & 3) == 0);
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
DCHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
return;
int imm26 = target_pos - (pos + kPcLoadDelta);
DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
DCHECK_EQ(0, imm26 & 1);
instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
} else {
DCHECK_EQ(0, imm26 & 3);
instr &= ~kImm24Mask;
}
// Patch internal reference to label.
DCHECK_EQ(7 * B25 | 1 * B0, instr & (7 * B25 | 1 * B0));
instr_at_put(pos, reinterpret_cast<Instr>(buffer_ + target_pos));
int imm24 = imm26 >> 2;
DCHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@ -2221,6 +2209,7 @@ void Assembler::vldr(const DwVfpRegister dst,
// Vd(15-12) | 1011(11-8) | offset
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
offset = -offset;
u = 0;
}
@ -2317,6 +2306,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Vd(15-12) | 1011(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
offset = -offset;
u = 0;
}
@ -2365,6 +2355,7 @@ void Assembler::vstr(const SwVfpRegister src,
// Vdst(15-12) | 1010(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
offset = -offset;
u = 0;
}
@ -2945,6 +2936,21 @@ void Assembler::vneg(const DwVfpRegister dst,
}
void Assembler::vneg(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=0(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
B6 | m * B5 | vm);
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
@ -2960,6 +2966,20 @@ void Assembler::vabs(const DwVfpRegister dst,
}
void Assembler::vabs(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=0(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B7 | B6 |
m * B5 | vm);
}
void Assembler::vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -2980,6 +3000,24 @@ void Assembler::vadd(const DwVfpRegister dst,
}
void Assembler::vadd(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vadd(Sn, Sm) single precision floating point addition.
// Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
0x5 * B9 | n * B7 | m * B5 | vm);
}
void Assembler::vsub(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -3000,6 +3038,24 @@ void Assembler::vsub(const DwVfpRegister dst,
}
void Assembler::vsub(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vsub(Sn, Sm) single precision floating point subtraction.
// Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
0x5 * B9 | n * B7 | B6 | m * B5 | vm);
}
void Assembler::vmul(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -3020,6 +3076,24 @@ void Assembler::vmul(const DwVfpRegister dst,
}
void Assembler::vmul(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vmul(Sn, Sm) single precision floating point multiplication.
// Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
0x5 * B9 | n * B7 | m * B5 | vm);
}
void Assembler::vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -3038,6 +3112,22 @@ void Assembler::vmla(const DwVfpRegister dst,
}
void Assembler::vmla(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
m * B5 | vm);
}
void Assembler::vmls(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -3056,6 +3146,22 @@ void Assembler::vmls(const DwVfpRegister dst,
}
void Assembler::vmls(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
B6 | m * B5 | vm);
}
void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -3076,6 +3182,24 @@ void Assembler::vdiv(const DwVfpRegister dst,
}
void Assembler::vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vdiv(Sn, Sm) single precision floating point division.
// Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
m * B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
@ -3092,6 +3216,21 @@ void Assembler::vcmp(const DwVfpRegister src1,
}
void Assembler::vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond) {
// vcmp(Sd, Sm) single precision floating point comparison.
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
src1.split_code(&vd, &d);
int vm, m;
src2.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
0x5 * B9 | B6 | m * B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond) {
@ -3106,21 +3245,17 @@ void Assembler::vcmp(const DwVfpRegister src1,
}
void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
void Assembler::vcmp(const SwVfpRegister src1, const float src2,
const Condition cond) {
// vcmp(Sd, #0.0) single precision floating point comparison.
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
DCHECK(src2 == 0.0);
int vd, d;
src1.split_code(&vd, &d);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
0x5 * B9 | B6);
}
@ -3139,6 +3274,36 @@ void Assembler::vsqrt(const DwVfpRegister dst,
}
void Assembler::vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
0x3 * B6 | m * B5 | vm);
}
void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
}
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
}
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@ -3387,16 +3552,9 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Relocate internal references.
for (RelocIterator it(desc); !it.done(); it.next()) {
if (it.rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
// Don't patch unbound internal references (bit 0 set); those are still
// hooked up in the Label chain and will be automatically patched once
// the label is bound.
int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
if ((*p & 1 * B0) == 0) *p += pc_delta;
}
}
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
@ -3440,37 +3598,6 @@ void Assembler::dd(uint32_t data) {
}
void Assembler::dd(Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
if (label->is_bound()) {
uint32_t data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
DCHECK_EQ(0u, data & 1 * B0);
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
} else {
int target_pos;
if (label->is_linked()) {
// Point to previous instruction that uses the link.
target_pos = label->pos();
} else {
// First entry of the link chain points to itself.
target_pos = pc_offset();
}
label->link_to(pc_offset());
// Encode internal reference to unbound label. We set the least significant
// bit to distinguish unbound internal references in GrowBuffer() below.
int imm26 = target_pos - pc_offset();
DCHECK_EQ(0, imm26 & 3);
int imm24 = imm26 >> 2;
DCHECK(is_int24(imm24));
// We use bit pattern 0000111<imm24>1 because that doesn't match any branch
// or load that would also appear on the label chain.
emit(7 * B25 | ((imm24 & kImm24Mask) << 1) | 1 * B0);
}
}
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =

32
deps/v8/src/arm/assembler-arm.h

@ -162,9 +162,9 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r7 = {kRegister_r7_Code};
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code };
@ -1251,49 +1251,70 @@ class Assembler : public AssemblerBase {
int fraction_bits,
const Condition cond = al);
void vmrs(const Register dst, const Condition cond = al);
void vmsr(const Register dst, const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vneg(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vabs(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
void vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vsub(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmul(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vmls(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
void vmsr(const Register dst,
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
// ARMv8 rounding instructions.
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
@ -1434,7 +1455,6 @@ class Assembler : public AssemblerBase {
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
void dd(Label* label);
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);

105
deps/v8/src/arm/builtins-arm.cc

@ -830,6 +830,42 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
}
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers r2; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
// Make r2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
if (argc_is_tagged == kArgcIsSmiTagged) {
__ cmp(r2, Operand::PointerOffsetFromSmiKey(argc));
} else {
DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
__ cmp(r2, Operand(argc, LSL, kPointerSizeLog2));
}
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, calleeOffset));
if (argc_is_tagged == kArgcIsUntaggedInt) {
__ SmiTag(argc);
}
__ Push(r1, argc);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
@ -857,6 +893,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ push(r1);
__ push(r2);
// Check if we have enough stack space to push all arguments.
// The function is the first thing that was pushed above after entering
// the internal frame.
const int kFunctionOffset =
InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers r2.
Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop.
// r1: function
// r3: argc
@ -1018,6 +1062,11 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@ -1336,61 +1385,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
// Make r2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
__ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, calleeOffset));
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
}
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
Label entry, loop;
__ ldr(r0, MemOperand(fp, indexOffset));
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
__ ldr(key, MemOperand(fp, indexOffset));
__ b(&entry);
// Load the current argument from the arguments array and push it to the
// stack.
// r0: current argument index
// Load the current argument from the arguments array.
__ bind(&loop);
__ ldr(r1, MemOperand(fp, argumentsOffset));
__ Push(r1, r0);
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
__ Call(ic, RelocInfo::CODE_TARGET);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
// Push the nth argument.
__ push(r0);
// Use inline caching to access the arguments.
__ ldr(r0, MemOperand(fp, indexOffset));
__ add(r0, r0, Operand(1 << kSmiTagSize));
__ str(r0, MemOperand(fp, indexOffset));
__ ldr(key, MemOperand(fp, indexOffset));
__ add(key, key, Operand(1 << kSmiTagSize));
__ str(key, MemOperand(fp, indexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ bind(&entry);
__ ldr(r1, MemOperand(fp, limitOffset));
__ cmp(r0, r1);
__ cmp(key, r1);
__ b(ne, &loop);
// On exit, the pushed arguments count is in r0, untagged
__ mov(r0, key);
__ SmiUntag(r0);
}
@ -1416,7 +1445,7 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
}
Generate_CheckStackOverflow(masm, kFunctionOffset);
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset =
@ -1544,7 +1573,7 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(r0);
__ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
Generate_CheckStackOverflow(masm, kFunctionOffset);
Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset =

216
deps/v8/src/arm/code-stubs-arm.cc

@ -253,6 +253,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond == lt || cond == gt) {
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@ -260,6 +262,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -920,6 +924,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
TypeofStub::GenerateAheadOfTime(isolate);
}
@ -1004,16 +1010,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ VFPEnsureFPSCRState(r2);
// Runtime functions should not return 'the hole'. Allowing it to escape may
// lead to crashes in the IC code later.
if (FLAG_debug_code) {
Label okay;
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
__ b(ne, &okay);
__ stop("The hole escaped");
__ bind(&okay);
}
// Check result for exception sentinel.
Label exception_returned;
__ CompareRoot(r0, Heap::kExceptionRootIndex);
@ -1058,7 +1054,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Ask the runtime for help to determine the handler. This will set r0 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r0);
@ -1333,6 +1330,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ GetRelocatedValueLocation(r9, map_load_offset, scratch);
__ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
__ mov(r8, map);
// |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
__ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
// Register mapping: r3 is object map and r4 is function prototype.
@ -2370,6 +2375,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r2 : Feedback vector
// r3 : slot in feedback vector (Smi)
// r1 : the function to call
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
__ CallStub(stub);
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
@ -2391,16 +2414,31 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmp(r4, r1);
// We don't know if r4 is a WeakCell or a Symbol, but it's harmless to read at
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
Register weak_value = r8;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r4, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
if (!FLAG_pretenuring_call_new) {
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite. Do a map check on the object in ecx.
__ ldr(r5, FieldMemOperand(r4, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
// AllocationSite.
__ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Make sure the function is the Array() function
@ -2436,33 +2474,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
CreateAllocationSiteStub create_stub(masm->isolate());
__ CallStub(&create_stub);
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
}
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r1, MemOperand(r4, 0));
__ Push(r4, r2, r1);
__ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Pop(r4, r2, r1);
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
__ bind(&done);
}
@ -4367,21 +4387,15 @@ void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, Register scratch1,
Register scratch2, Register scratch3,
Register feedback, Register receiver_map,
Register scratch1, Register scratch2,
bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register receiver_map = scratch1;
Register cached_map = scratch2;
Register cached_map = scratch1;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&compare_map);
__ ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
@ -4393,7 +4407,7 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
Register length = scratch3;
Register length = scratch2;
__ bind(&start_polymorphic);
__ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
@ -4413,9 +4427,9 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch3
// also need receiver_map (aka scratch1)
// use cached_map (scratch2) to look in the weak map values.
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
@ -4436,42 +4450,23 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
// We exhausted our array of map handler pairs.
__ jmp(miss);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Register scratch,
Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
Register receiver_map = scratch;
Register cached_map = weak_cell;
// Move the weak map into the weak_cell register.
__ ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ cmp(cached_map, receiver_map);
__ b(ne, miss);
Register handler = weak_cell;
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
__ bind(&compare_smi_map);
__ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex);
__ b(ne, miss);
__ b(ne, try_array);
Register handler = feedback;
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
@ -4485,26 +4480,27 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register scratch1 = r5;
Register receiver_map = r5;
Register scratch1 = r8;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ b(ne, &try_array);
HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
&miss);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
// Is it a fixed array?
__ bind(&try_array);
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r8,
r9, true, &miss);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
scratch1, r9, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
@ -4513,10 +4509,15 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
scratch1, r8, r9);
receiver_map, scratch1, r9);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
@ -4536,30 +4537,31 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register scratch1 = r5;
Register receiver_map = r5;
Register scratch1 = r8;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
__ b(ne, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
&miss);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
r9, true, &miss);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
scratch1, r9, true, &miss);
__ bind(&not_array);
// Is it generic?
@ -4578,11 +4580,15 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
r9, false, &miss);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
scratch1, r9, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}

12
deps/v8/src/arm/code-stubs-arm.h

@ -74,7 +74,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() override { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
@ -197,9 +197,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const FINAL { return RecordWrite; }
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) OVERRIDE;
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@ -207,7 +207,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) OVERRIDE {
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@ -255,7 +255,7 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() OVERRIDE { return true; }
bool NeedsImmovableCode() override { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@ -287,7 +287,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Register r0,
Register r1);
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;

57
deps/v8/src/arm/disasm-arm.cc

@ -1324,17 +1324,27 @@ int Decoder::DecodeType7(Instruction* instr) {
// vcvt: Sd = Dm
// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Sd = vabs(Sm)
// Dd = vneg(Dm)
// Sd = vneg(Sm)
// Dd = vadd(Dn, Dm)
// Sd = vadd(Sn, Sm)
// Dd = vsub(Dn, Dm)
// Sd = vsub(Sn, Sm)
// Dd = vmul(Dn, Dm)
// Sd = vmul(Sn, Sm)
// Dd = vmla(Dn, Dm)
// Sd = vmla(Sn, Sm)
// Dd = vmls(Dn, Dm)
// Sd = vmls(Sn, Sm)
// Dd = vdiv(Dn, Dm)
// Sd = vdiv(Sn, Sm)
// vcmp(Dd, Dm)
// vcmp(Sd, Sm)
// Dd = vsqrt(Dm)
// Sd = vsqrt(Sm)
// vmrs
// vmsr
// Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instruction* instr) {
VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
VERIFY(instr->Bits(11, 9) == 0x5);
@ -1351,10 +1361,18 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
Format(instr, "vabs'cond.f64 'Dd, 'Dm");
if (instr->SzValue() == 0x1) {
Format(instr, "vabs'cond.f64 'Dd, 'Dm");
} else {
Format(instr, "vabs'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
Format(instr, "vneg'cond.f64 'Dd, 'Dm");
if (instr->SzValue() == 0x1) {
Format(instr, "vneg'cond.f64 'Dd, 'Dm");
} else {
Format(instr, "vneg'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -1373,7 +1391,11 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
if (instr->SzValue() == 0x1) {
Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
} else {
Format(instr, "vsqrt'cond.f32 'Sd, 'Sm");
}
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov'cond.f64 'Dd, 'd");
@ -1381,12 +1403,11 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Unknown(instr); // Not used by V8.
}
} else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
bool dp_operation = (instr->SzValue() == 1);
// vrintz - round towards zero (truncate)
if (dp_operation) {
if (instr->SzValue() == 0x1) {
Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vrintz'cond.f32.f32 'Sd, 'Sm");
}
} else {
Unknown(instr); // Not used by V8.
@ -1399,31 +1420,35 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub'cond.f32 'Sd, 'Sn, 'Sm");
} else {
Format(instr, "vadd'cond.f32 'Sd, 'Sn, 'Sm");
}
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vmul'cond.f32 'Sd, 'Sn, 'Sm");
}
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vmla'cond.f32 'Sd, 'Sn, 'Sm");
}
} else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vmls'cond.f32 'Sd, 'Sn, 'Sm");
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
Format(instr, "vdiv'cond.f32 'Sd, 'Sn, 'Sm");
}
} else {
Unknown(instr); // Not used by V8.
@ -1501,6 +1526,14 @@ void Decoder::DecodeVCMP(Instruction* instr) {
} else {
Unknown(instr); // invalid
}
} else if (!raise_exception_for_qnan) {
if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp'cond.f32 'Sd, 'Sm");
} else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp'cond.f32 'Sd, #0.0");
} else {
Unknown(instr); // invalid
}
} else {
Unknown(instr); // Not used by V8.
}

172
deps/v8/src/arm/full-codegen-arm.cc

@ -13,7 +13,6 @@
#include "src/debug.h"
#include "src/full-codegen.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
@ -127,7 +126,8 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native()) {
if (is_sloppy(info->language_mode()) && !info->is_native() &&
info->MayUseThis()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@ -962,38 +962,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(r1, scope_->ContextChainLength(scope_->ScriptScope()));
__ ldr(r1, ContextOperand(r1, descriptor->Index()));
__ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
// Assign it.
__ str(r1, ContextOperand(cp, variable->index()));
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(cp,
Context::SlotOffset(variable->index()),
r1,
r3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse into body.
Visit(declaration->module());
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
@ -1285,6 +1253,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(r3, Operand(r0), SetCC);
__ b(eq, loop_statement.continue_label());
@ -2482,7 +2451,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), op, language_mode()).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -2626,7 +2596,8 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ pop(r1);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), op, language_mode()).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@ -3118,6 +3089,22 @@ void FullCodeGenerator::EmitLoadSuperConstructor() {
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
SuperReference* super_ref) {
Variable* this_var = super_ref->this_var()->var();
GetVar(r1, this_var);
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
Label uninitialized_this;
__ b(eq, &uninitialized_this);
__ mov(r0, Operand(this_var->name()));
__ Push(r0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitVariableAssignment(this_var, Token::INIT_CONST);
}
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@ -3341,18 +3328,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
SuperReference* super_ref = expr->expression()->AsSuperReference();
Variable* this_var = super_ref->this_var()->var();
GetVar(r1, this_var);
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
Label uninitialized_this;
__ b(eq, &uninitialized_this);
__ mov(r0, Operand(this_var->name()));
__ Push(r0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitVariableAssignment(this_var, Token::INIT_CONST);
EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
context()->Plug(r0);
}
@ -4608,27 +4584,81 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
SuperReference* super_reference = args->at(0)->AsSuperReference();
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
__ str(r0, MemOperand(sp, kPointerSize));
// Push super
EmitLoadSuperConstructor();
__ Push(result_register());
// Push arguments array
VisitForStackValue(args->at(1));
// Push NewTarget
DCHECK(args->at(2)->IsVariableProxy());
VisitForStackValue(args->at(2));
EmitCallJSRuntimeFunction(call);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
EmitInitializeThisAfterSuper(super_reference);
}
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand());
__ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
}
}
void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
// Record source position of the IC call.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand());
__ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
__ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
}
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
@ -4640,11 +4670,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
// Record source position of the IC call.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -4773,10 +4799,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
{ StackValueContext context(this);
{
AccumulatorValueContext context(this);
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
__ mov(r3, r0);
TypeofStub typeof_stub(isolate());
__ CallStub(&typeof_stub);
context()->Plug(r0);
break;
}
@ -4947,7 +4976,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), Token::ADD, language_mode()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);

11
deps/v8/src/arm/interface-descriptors-arm.cc

@ -54,6 +54,11 @@ const Register MathPowIntegerDescriptor::exponent() {
}
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return r2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2};
data->Initialize(arraysize(registers), registers, NULL);
@ -78,6 +83,12 @@ void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1};

44
deps/v8/src/arm/lithium-arm.cc

@ -1112,17 +1112,10 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (FLAG_vector_ics) {
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
vector =
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
}
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register, slot, vector);
context, receiver_register, name_register);
}
@ -2053,6 +2046,15 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
HCheckArrayBufferNotNeutered* instr) {
LOperand* view = UseRegisterAtStart(instr->value());
LCheckArrayBufferNotNeutered* result =
new (zone()) LCheckArrayBufferNotNeutered(view);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@ -2241,14 +2243,21 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
if ((instr->is_external() || instr->is_fixed_typed_array()) ?
// see LCodeGen::DoLoadKeyedExternalArray
((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS) &&
!instr->CheckFlag(HInstruction::kUint32)) :
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
// LCodeGen::DoLoadKeyedFixedArray
instr->RequiresHoleCheck()) {
bool needs_environment;
if (instr->is_external() || instr->is_fixed_typed_array()) {
// see LCodeGen::DoLoadKeyedExternalArray
needs_environment = (elements_kind == EXTERNAL_UINT32_ELEMENTS ||
elements_kind == UINT32_ELEMENTS) &&
!instr->CheckFlag(HInstruction::kUint32);
} else {
// see LCodeGen::DoLoadKeyedFixedDoubleArray and
// LCodeGen::DoLoadKeyedFixedArray
needs_environment =
instr->RequiresHoleCheck() ||
(instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
}
if (needs_environment) {
result = AssignEnvironment(result);
}
return result;
@ -2541,7 +2550,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0));
LOperand* value = UseFixed(instr->value(), r3);
LTypeof* result = new (zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, r0), instr);
}

443
deps/v8/src/arm/lithium-arm.h

File diff suppressed because it is too large

203
deps/v8/src/arm/lithium-codegen-arm.cc

@ -18,7 +18,7 @@ namespace v8 {
namespace internal {
class SafepointGenerator FINAL : public CallWrapper {
class SafepointGenerator final : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@ -28,9 +28,9 @@ class SafepointGenerator FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
void BeforeCall(int call_size) const OVERRIDE {}
void BeforeCall(int call_size) const override {}
void AfterCall() const OVERRIDE {
void AfterCall() const override {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@ -120,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@ -180,6 +180,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r1.
DCHECK(!info()->scope()->is_script_scope());
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
@ -2173,7 +2174,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@ -2782,16 +2784,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
@ -3208,7 +3210,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
int base_offset = instr->base_offset();
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
@ -3347,6 +3348,23 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ cmp(result, scratch);
DeoptimizeIf(eq, instr, Deoptimizer::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
Label done;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
__ b(ne, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
// protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
}
}
@ -3398,7 +3416,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
@ -3726,14 +3744,14 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LMathAbs* instr_;
@ -3956,32 +3974,13 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
Register extra2 = r6;
Register extra3 = r9;
#ifdef DEBUG
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
DCHECK(!FLAG_vector_ics ||
!AreAliased(slot, vector, scratch, extra, extra2, extra3));
#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
if (!instr->hydrogen()->is_just_miss()) {
DCHECK(!instr->hydrogen()->is_keyed_load());
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
receiver, name, scratch, extra, extra2, extra3);
}
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
if (instr->hydrogen()->is_keyed_load()) {
KeyedLoadIC::GenerateMiss(masm());
} else {
LoadIC::GenerateMiss(masm());
}
LoadIC::GenerateMiss(masm());
}
@ -4018,8 +4017,12 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
PlatformInterfaceDescriptor* call_descriptor =
instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
if (call_descriptor != NULL) {
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
} else {
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
}
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
@ -4110,7 +4113,14 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
if (instr->arity() == 1) {
// We only need the allocation site for the case we have a length argument.
// The case may bail out to the runtime, which will determine the correct
// elements kind with the site.
__ Move(r2, instr->hydrogen()->site());
} else {
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@ -4556,12 +4566,12 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt FINAL : public LDeferredCode {
class DeferredStringCharCodeAt final : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
LInstruction* instr() override { return instr_; }
private:
LStringCharCodeAt* instr_;
@ -4611,14 +4621,14 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
class DeferredStringCharFromCode FINAL : public LDeferredCode {
class DeferredStringCharFromCode final : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredStringCharFromCode(instr_);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LStringCharFromCode* instr_;
@ -4689,18 +4699,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI FINAL : public LDeferredCode {
class DeferredNumberTagI final : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LNumberTagI* instr_;
@ -4717,18 +4727,18 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU FINAL : public LDeferredCode {
class DeferredNumberTagU final : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LNumberTagU* instr_;
@ -4812,12 +4822,12 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD FINAL : public LDeferredCode {
class DeferredNumberTagD final : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
LInstruction* instr() override { return instr_; }
private:
LNumberTagD* instr_;
@ -5030,12 +5040,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI FINAL : public LDeferredCode {
class DeferredTaggedToI final : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
LInstruction* instr() override { return instr_; }
private:
LTaggedToI* instr_;
@ -5148,6 +5158,18 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
}
void LCodeGen::DoCheckArrayBufferNotNeutered(
LCheckArrayBufferNotNeutered* instr) {
Register view = ToRegister(instr->view());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
Register scratch = scratch0();
@ -5224,17 +5246,17 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
class DeferredCheckMaps FINAL : public LDeferredCode {
class DeferredCheckMaps final : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LCheckMaps* instr_;
@ -5355,12 +5377,12 @@ void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate FINAL : public LDeferredCode {
class DeferredAllocate final : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
void Generate() override { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() override { return instr_; }
private:
LAllocate* instr_;
@ -5378,13 +5400,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->size()->IsConstantOperand()) {
@ -5446,13 +5464,9 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
int flags = AllocateDoubleAlignFlag::encode(
instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
flags = AllocateTargetSpace::update(flags, OLD_SPACE);
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
@ -5536,9 +5550,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
Register input = ToRegister(instr->value());
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
Label end, do_call;
Register value_register = ToRegister(instr->value());
__ JumpIfNotSmi(value_register, &do_call);
__ mov(r0, Operand(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
TypeofStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@ -5719,12 +5741,12 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
void LCodeGen::DoStackCheck(LStackCheck* instr) {
class DeferredStackCheck FINAL : public LDeferredCode {
class DeferredStackCheck final : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
LInstruction* instr() override { return instr_; }
private:
LStackCheck* instr_;
@ -5782,15 +5804,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
DeoptimizeIf(eq, instr, Deoptimizer::kNull);
__ SmiTst(r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
@ -5799,6 +5812,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -5865,7 +5880,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
class DeferredLoadMutableDouble FINAL : public LDeferredCode {
class DeferredLoadMutableDouble final : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@ -5878,10 +5893,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LLoadFieldByIndex* instr_;

8
deps/v8/src/arm/lithium-codegen-arm.h

@ -169,7 +169,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@ -265,7 +265,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
void RecordAndWritePosition(int position) OVERRIDE;
void RecordAndWritePosition(int position) override;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
@ -314,7 +314,7 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void EnsureSpaceForLazyDeopt(int space_needed) override;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@ -344,7 +344,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {

2
deps/v8/src/arm/lithium-gap-resolver-arm.h

@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver FINAL BASE_EMBEDDED {
class LGapResolver final BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);

42
deps/v8/src/arm/macro-assembler-arm.cc

@ -14,7 +14,6 @@
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@ -861,6 +860,21 @@ void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
}
void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
const float src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
@ -876,6 +890,25 @@ void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
}
void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
// Compare and load FPSCR.
vcmp(src1, src2, cond);
vmrs(fpscr_flags, cond);
}
void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
const float src2,
const Register fpscr_flags,
const Condition cond) {
// Compare and load FPSCR.
vcmp(src1, src2, cond);
vmrs(fpscr_flags, cond);
}
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
@ -894,6 +927,7 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
@ -1677,12 +1711,11 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
if ((flags & PRETENURE) != 0) {
cmp(result, Operand(ip));
b(hs, gc_required);
}
@ -1791,12 +1824,11 @@ void MacroAssembler::Allocate(Register object_size,
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
if ((flags & PRETENURE) != 0) {
cmp(result, Operand(ip));
b(hs, gc_required);
}

17
deps/v8/src/arm/macro-assembler-arm.h

@ -483,6 +483,12 @@ class MacroAssembler: public Assembler {
VFPCanonicalizeNaN(value, value, cond);
}
// Compare single values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond = al);
void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
const Condition cond = al);
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@ -491,6 +497,15 @@ class MacroAssembler: public Assembler {
const double src2,
const Condition cond = al);
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
const Register fpscr_flags,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@ -709,7 +724,7 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space or old pointer space. The object_size is
// Allocate an object in new space or old space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag

106
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1040,102 +1040,22 @@ static T& frame_entry(Address re_frame, int frame_offset) {
}
template <typename T>
static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
// If this is a direct call from JavaScript retry the RegExp forcing the call
// through the runtime system. Currently the direct call cannot handle a GC.
if (frame_entry<int>(re_frame, kDirectCall) == 1) {
return RETRY;
}
// Prepare for possible GC.
HandleScope handles(isolate);
Handle<Code> code_handle(re_code);
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
// Overwrite the return address on the stack.
*return_address += delta;
}
if (result->IsException()) {
return EXCEPTION;
}
Handle<String> subject_tmp = subject;
int slice_offset = 0;
// Extract the underlying string and the slice offset.
if (StringShape(*subject_tmp).IsCons()) {
subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
} else if (StringShape(*subject_tmp).IsSliced()) {
SlicedString* slice = SlicedString::cast(*subject_tmp);
subject_tmp = Handle<String>(slice->parent());
slice_offset = slice->offset();
}
// String might have changed.
if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
// If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
}
// Otherwise, the content of the string might have moved. It must still
// be a sequential or external string with the same content.
// Update the start and end pointers in the stack frame to the current
// location (whether it has actually moved or not).
DCHECK(StringShape(*subject_tmp).IsSequential() ||
StringShape(*subject_tmp).IsExternal());
// The original start address of the characters to match.
const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
// Find the current start address of the same character at the current string
// position.
int start_index = frame_entry<int>(re_frame, kStartIndex);
const byte* new_address = StringCharacterPosition(*subject_tmp,
start_index + slice_offset);
if (start_address != new_address) {
// If there is a difference, update the object pointer and start and end
// addresses in the RegExp stack frame to match the new value.
const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
int byte_length = static_cast<int>(end_address - start_address);
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
} else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
// Subject string might have been a ConsString that underwent
// short-circuiting during GC. That will not change start_address but
// will change pointer inside the subject handle.
frame_entry<const String*>(re_frame, kInputString) = *subject;
}
return 0;
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
frame_entry<int>(re_frame, kStartIndex),
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
frame_entry_address<String*>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
}

258
deps/v8/src/arm/simulator-arm.cc

@ -657,9 +657,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
ICacheHash(page),
true);
v8::internal::HashMap::Entry* entry =
i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@ -1309,6 +1308,33 @@ bool Simulator::OverflowFrom(int32_t alu_out,
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(float val1, float val2) {
if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = true;
// All non-NaN cases.
} else if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = false;
} else if (val1 < val2) {
n_flag_FPSCR_ = true;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
} else {
// Case when (val1 > val2).
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = false;
}
}
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
@ -1914,6 +1940,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
float Simulator::canonicalizeNaN(float value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
const uint32_t kDefaultNaN = 0x7FC00000u;
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<float>(kDefaultNaN);
}
return value;
}
double Simulator::canonicalizeNaN(double value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
@ -3009,18 +3046,30 @@ void Simulator::DecodeType7(Instruction* instr) {
// vcvt: Sd = Dm
// vcvt.f64.s32 Dd, Dd, #<fbits>
// Dd = vabs(Dm)
// Sd = vabs(Sm)
// Dd = vneg(Dm)
// Sd = vneg(Sm)
// Dd = vadd(Dn, Dm)
// Sd = vadd(Sn, Sm)
// Dd = vsub(Dn, Dm)
// Sd = vsub(Sn, Sm)
// Dd = vmul(Dn, Dm)
// Sd = vmul(Sn, Sm)
// Dd = vdiv(Dn, Dm)
// Sd = vdiv(Sn, Sm)
// vcmp(Dd, Dm)
// vmrs
// vcmp(Sd, Sm)
// Dd = vsqrt(Dm)
// Sd = vsqrt(Sm)
// vmrs
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
int n = instr->VFPNRegValue(kSinglePrecision);
// Obtain double precision register codes.
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
@ -3032,28 +3081,38 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzValue() == 0x1) {
int m = instr->VFPMRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
uint32_t data[2];
get_d_register(m, data);
set_d_register(d, data);
get_d_register(vm, data);
set_d_register(vd, data);
} else {
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
set_s_register_from_float(d, get_float_from_s_register(m));
set_s_register(d, get_s_register(m));
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
double dm_value = get_double_from_d_register(vm);
double dd_value = std::fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = std::fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = std::fabs(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = -sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -3073,10 +3132,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = fast_sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = fast_sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = fast_sqrt(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
if (instr->SzValue() == 0x1) {
@ -3094,72 +3160,103 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
UNREACHABLE(); // Not used by V8.
}
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
if (instr->Opc3Value() & 0x1) {
// vsub
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sd_value = sn_value - sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else {
// vadd
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sd_value = sn_value + sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
double dd_value = dn_value * dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sd_value = sn_value * sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0)) {
// vmla, vmls
const bool is_vmls = (instr->Opc3Value() & 0x1);
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
const double dd_val = get_double_from_d_register(vd);
const double dn_val = get_double_from_d_register(vn);
const double dm_val = get_double_from_d_register(vm);
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
set_d_register_from_double(
vd,
canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
if (instr->SzValue() == 0x1) {
const double dd_val = get_double_from_d_register(vd);
const double dn_val = get_double_from_d_register(vn);
const double dm_val = get_double_from_d_register(vm);
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
set_d_register_from_double(
vd, canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
} else {
set_d_register_from_double(
vd, canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
}
} else {
set_d_register_from_double(
vd,
canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
const float sd_val = get_float_from_s_register(d);
const float sn_val = get_float_from_s_register(n);
const float sm_val = get_float_from_s_register(m);
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_s_register_from_float(d, sn_val * sm_val);
if (is_vmls) {
set_s_register_from_float(
d, canonicalizeNaN(sd_val - get_float_from_s_register(d)));
} else {
set_s_register_from_float(
d, canonicalizeNaN(sd_val + get_float_from_s_register(d)));
}
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sd_value = sn_value / sm_value;
div_zero_vfp_flag_ = (sm_value == 0);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
}
@ -3264,7 +3361,7 @@ void Simulator::DecodeVCMP(Instruction* instr) {
// Comparison.
VFPRegPrecision precision = kSinglePrecision;
if (instr->SzValue() == 1) {
if (instr->SzValue() == 0x1) {
precision = kDoublePrecision;
}
@ -3290,7 +3387,20 @@ void Simulator::DecodeVCMP(Instruction* instr) {
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
float sd_value = get_float_from_s_register(d);
float sm_value = 0.0;
if (instr->Opc2Value() == 0x4) {
sm_value = get_float_from_s_register(m);
}
// Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) {
if (std::isnan(sd_value)) {
inv_op_vfp_flag_ = true;
}
}
Compute_FPSCR_Flags(sd_value, sm_value);
}
}

2
deps/v8/src/arm/simulator-arm.h

@ -265,8 +265,10 @@ class Simulator {
}
// Support for VFP.
void Compute_FPSCR_Flags(float val1, float val2);
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
inline float canonicalizeNaN(float value);
inline double canonicalizeNaN(double value);
// Helper functions to decode common "addressing" modes

122
deps/v8/src/arm64/builtins-arm64.cc

@ -799,6 +799,49 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
}
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers x10, x15; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset, Register argc,
IsTagged argc_is_tagged) {
Register function = x15;
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
__ Ldr(function, MemOperand(fp, calleeOffset));
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
if (argc_is_tagged == kArgcIsSmiTagged) {
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
} else {
DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
__ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
}
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
if (argc_is_tagged == kArgcIsUntaggedInt) {
__ SmiTag(argc);
}
__ Push(function, argc);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
}
__ Bind(&enough_stack_space);
}
// Input:
// x0: code entry.
// x1: function.
@ -832,6 +875,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Push the function and the receiver onto the stack.
__ Push(function, receiver);
// Check if we have enough stack space to push all arguments.
// The function is the first thing that was pushed above after entering
// the internal frame.
const int kFunctionOffset =
InternalFrameConstants::kCodeOffset - kPointerSize;
// Expects argument count in eax. Clobbers ecx, edx, edi.
Generate_CheckStackOverflow(masm, kFunctionOffset, argc,
kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop, in reverse order.
// x3: argc.
// x4: argv.
@ -1006,6 +1058,11 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@ -1324,71 +1381,42 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
static void Generate_CheckStackOverflow(MacroAssembler* masm,
const int calleeOffset) {
Register argc = x0;
Register function = x15;
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
__ Ldr(function, MemOperand(fp, calleeOffset));
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
__ Push(function, argc);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
}
__ Bind(&enough_stack_space);
}
static void Generate_PushAppliedArguments(MacroAssembler* masm,
const int argumentsOffset,
const int indexOffset,
const int limitOffset) {
Label entry, loop;
Register current = x0;
__ Ldr(current, MemOperand(fp, indexOffset));
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
__ Ldr(key, MemOperand(fp, indexOffset));
__ B(&entry);
// Load the current argument from the arguments array.
__ Bind(&loop);
// Load the current argument from the arguments array and push it.
// TODO(all): Couldn't we optimize this for JS arrays?
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
__ Ldr(x1, MemOperand(fp, argumentsOffset));
__ Push(x1, current);
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
__ Call(ic, RelocInfo::CODE_TARGET);
// Call the runtime to access the property in the arguments array.
__ CallRuntime(Runtime::kGetProperty, 2);
// Push the nth argument.
__ Push(x0);
// Use inline caching to access the arguments.
__ Ldr(current, MemOperand(fp, indexOffset));
__ Add(current, current, Smi::FromInt(1));
__ Str(current, MemOperand(fp, indexOffset));
__ Ldr(key, MemOperand(fp, indexOffset));
__ Add(key, key, Smi::FromInt(1));
__ Str(key, MemOperand(fp, indexOffset));
// Test if the copy loop has finished copying all the elements from the
// arguments object.
__ Bind(&entry);
__ Ldr(x1, MemOperand(fp, limitOffset));
__ Cmp(current, x1);
__ Cmp(key, x1);
__ B(ne, &loop);
// On exit, the pushed arguments count is in x0, untagged
__ SmiUntag(current);
__ Mov(x0, key);
__ SmiUntag(x0);
}
@ -1422,7 +1450,7 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
}
Register argc = x0;
Generate_CheckStackOverflow(masm, kFunctionOffset);
Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
// Push current limit and index.
__ Mov(x1, 0); // Initial index.
@ -1549,7 +1577,7 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
Register argc = x0;
Generate_CheckStackOverflow(masm, kFunctionOffset);
Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
// Push current limit and index, constructor & newTarget
__ Mov(x1, 0); // Initial index.

238
deps/v8/src/arm64/code-stubs-arm64.cc

@ -221,18 +221,22 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
__ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
ge);
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
slow, ge);
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
Register right_type = scratch;
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, slow);
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -979,6 +983,8 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
TypeofStub::GenerateAheadOfTime(isolate);
}
@ -1202,7 +1208,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Ask the runtime for help to determine the handler. This will set x0 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
@ -1543,6 +1550,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// We have a cell, so need another level of dereferencing.
__ Ldr(scratch1, MemOperand(scratch1));
__ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Mov(x14, map);
// |scratch1| points at the beginning of the cell. Calculate the
// field containing the map.
__ Add(function, scratch1, Operand(Cell::kValueOffset - 1));
__ RecordWriteField(scratch1, Cell::kValueOffset, x14, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
} else {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
@ -2733,16 +2748,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
static void GenerateRecordCallTarget(MacroAssembler* masm,
Register argc,
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector,
Register index) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector,
Register index,
Register scratch1,
Register scratch2) {
Register feedback_vector, Register index,
Register scratch1, Register scratch2,
Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2,
argc, function, feedback_vector, index));
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
feedback_vector, index));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
@ -2757,22 +2788,39 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state.
__ Add(scratch1, feedback_vector,
Register feedback = scratch1;
Register feedback_map = scratch2;
Register feedback_value = scratch3;
__ Add(feedback, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ Cmp(scratch1, function);
// We don't know if feedback value is a WeakCell or a Symbol, but it's
// harmless to read at this position in a symbol (see static asserts in
// type-feedback-vector.h).
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
__ B(eq, &done);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ B(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(feedback_value, &initialize);
__ B(&megamorphic);
if (!FLAG_pretenuring_call_new) {
__ bind(&check_allocation_site);
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite. Do a map check on the object in scratch1 register.
__ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset));
__ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss);
// AllocationSite.
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
@ -2808,39 +2856,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm,
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateAllocationSiteStub create_stub(masm->isolate());
// Arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
// CreateAllocationSiteStub expect the feedback vector in x2 and the slot
// index in x3.
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(&create_stub);
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index);
__ B(&done);
__ Bind(&not_array_function);
}
// An uninitialized cache is patched with the function.
__ Add(scratch1, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
__ Str(function, MemOperand(scratch1, 0));
__ Push(function);
__ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Pop(function);
CreateWeakCellStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index);
__ Bind(&done);
}
@ -2979,7 +3005,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
&slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5);
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11);
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
if (FLAG_pretenuring_call_new) {
@ -4494,21 +4520,16 @@ void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
static void HandleArrayCases(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register feedback, Register scratch1,
Register scratch2, Register scratch3,
Register feedback, Register receiver_map,
Register scratch1, Register scratch2,
bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register receiver_map = scratch1;
Register cached_map = scratch2;
Register cached_map = scratch1;
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &load_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Bind(&compare_map);
__ Ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
@ -4520,7 +4541,7 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(feedback);
Register length = scratch3;
Register length = scratch2;
__ Bind(&start_polymorphic);
__ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
@ -4538,9 +4559,9 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch3
// also need receiver_map (aka scratch1)
// use cached_map (scratch2) to look in the weak map values.
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
@ -4563,43 +4584,24 @@ static void HandleArrayCases(MacroAssembler* masm, Register receiver,
// We exhausted our array of map handler pairs.
__ jmp(miss);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register key, Register vector, Register slot,
Register weak_cell, Register scratch,
Label* miss) {
// feedback initially contains the feedback array
Label compare_smi_map;
Register receiver_map = scratch;
Register cached_map = weak_cell;
// Move the weak map into the weak_cell register.
__ Ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
// Receiver might not be a heap object.
__ JumpIfSmi(receiver, &compare_smi_map);
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(cached_map, receiver_map);
__ B(ne, miss);
__ B(ne, try_array);
Register handler = weak_cell;
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(weak_cell);
// In microbenchmarks, it made sense to unroll this code so that the call to
// the handler is duplicated for a HeapObject receiver and a Smi receiver.
// TODO(mvstanton): does this hold on ARM?
__ Bind(&compare_smi_map);
__ JumpIfNotRoot(weak_cell, Heap::kHeapNumberMapRootIndex, miss);
Register handler = feedback;
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
@ -4614,24 +4616,26 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register scratch1 = x5;
Register receiver_map = x5;
Register scratch1 = x6;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
&miss);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
// Is it a fixed array?
__ Bind(&try_array);
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, x6,
x7, true, &miss);
HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
scratch1, x7, true, &miss);
__ Bind(&not_array);
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
@ -4639,10 +4643,14 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
false, receiver, name, feedback,
scratch1, x6, x7);
receiver_map, scratch1, x7);
__ Bind(&miss);
LoadIC::GenerateMiss(masm);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
@ -4662,28 +4670,30 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register scratch1 = x5;
Register receiver_map = x5;
Register scratch1 = x6;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Is it a weak cell?
Label try_array;
Label not_array, smi_key, key_okay, miss;
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
&miss);
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ Bind(&try_array);
// Is it a fixed array?
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ Bind(&polymorphic);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
x7, true, &miss);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
scratch1, x7, true, &miss);
__ Bind(&not_array);
// Is it generic?
@ -4702,11 +4712,15 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
x7, false, &miss);
HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
scratch1, x7, false, &miss);
__ Bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}

12
deps/v8/src/arm64/code-stubs-arm64.h

@ -97,7 +97,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() override { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
@ -275,9 +275,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const FINAL { return RecordWrite; }
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) OVERRIDE;
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@ -285,7 +285,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) OVERRIDE {
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@ -328,7 +328,7 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() OVERRIDE { return true; }
bool NeedsImmovableCode() override { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@ -360,7 +360,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Register scratch1,
Register scratch2);
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;

182
deps/v8/src/arm64/full-codegen-arm64.cc

@ -13,7 +13,6 @@
#include "src/debug.h"
#include "src/full-codegen.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
@ -125,7 +124,8 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info->language_mode()) && !info->is_native()) {
if (is_sloppy(info->language_mode()) && !info->is_native() &&
info->MayUseThis()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@ -959,38 +959,6 @@ void FullCodeGenerator::VisitFunctionDeclaration(
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
ModuleDescriptor* descriptor = declaration->module()->descriptor();
DCHECK(variable->location() == Variable::CONTEXT);
DCHECK(descriptor->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
__ Ldr(x1, ContextMemOperand(x1, descriptor->Index()));
__ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
// Assign it.
__ Str(x1, ContextMemOperand(cp, variable->index()));
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(cp,
Context::SlotOffset(variable->index()),
x1,
x3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse info body.
Visit(declaration->module());
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
@ -1272,6 +1240,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(x1, x3);
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ Mov(x3, x0);
__ Cbz(x0, loop_statement.continue_label());
@ -1740,19 +1709,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
// Duplicate receiver on stack.
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
VisitForEffect(key);
VisitForEffect(value);
__ Drop(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
@ -2152,7 +2119,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), op, language_mode()).code();
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
@ -2234,7 +2202,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ Pop(x1);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), op, language_mode()).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
@ -2805,6 +2774,21 @@ void FullCodeGenerator::EmitLoadSuperConstructor() {
}
void FullCodeGenerator::EmitInitializeThisAfterSuper(
SuperReference* super_ref) {
Variable* this_var = super_ref->this_var()->var();
GetVar(x1, this_var);
Label uninitialized_this;
__ JumpIfRoot(x1, Heap::kTheHoleValueRootIndex, &uninitialized_this);
__ Mov(x0, Operand(this_var->name()));
__ Push(x0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitVariableAssignment(this_var, Token::INIT_CONST);
}
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@ -3029,17 +3013,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
SuperReference* super_ref = expr->expression()->AsSuperReference();
Variable* this_var = super_ref->this_var()->var();
GetVar(x1, this_var);
Label uninitialized_this;
__ JumpIfRoot(x1, Heap::kTheHoleValueRootIndex, &uninitialized_this);
__ Mov(x0, Operand(this_var->name()));
__ Push(x0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitVariableAssignment(this_var, Token::INIT_CONST);
EmitInitializeThisAfterSuper(expr->expression()->AsSuperReference());
context()->Plug(x0);
}
@ -4294,28 +4268,81 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
void FullCodeGenerator::EmitCallSuperWithSpread(CallRuntime* expr) {
// Assert: expr === CallRuntime("ReflectConstruct")
CallRuntime* call = expr->arguments()->at(0)->AsCallRuntime();
ZoneList<Expression*>* args = call->arguments();
DCHECK_EQ(3, args->length());
SuperReference* super_reference = args->at(0)->AsSuperReference();
// Load ReflectConstruct function
EmitLoadJSRuntimeFunction(call);
// Push the target function under the receiver.
__ Pop(x10);
__ Push(x0, x10);
// Push super
EmitLoadSuperConstructor();
__ Push(result_register());
// Push arguments array
VisitForStackValue(args->at(1));
// Push NewTarget
DCHECK(args->at(2)->IsVariableProxy());
VisitForStackValue(args->at(2));
EmitCallJSRuntimeFunction(call);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
EmitInitializeThisAfterSuper(super_reference);
}
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(LoadDescriptor::ReceiverRegister(),
FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
__ Push(LoadDescriptor::ReceiverRegister());
// Load the function from the receiver.
Handle<String> name = expr->name();
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
}
}
void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
// Record source position of the IC call.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
Comment cmnt(masm_, "[ CallRunTime");
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(LoadDescriptor::ReceiverRegister(),
FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
__ Push(LoadDescriptor::ReceiverRegister());
// Load the function from the receiver.
Handle<String> name = expr->name();
__ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
}
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
__ Pop(x10);
@ -4325,11 +4352,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
// Record source position of the IC call.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
EmitCallJSRuntimeFunction(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -4459,10 +4482,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
{
StackValueContext context(this);
AccumulatorValueContext context(this);
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
__ Mov(x3, x0);
TypeofStub typeof_stub(isolate());
__ CallStub(&typeof_stub);
context()->Plug(x0);
break;
}
@ -4629,7 +4654,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
Assembler::BlockPoolsScope scope(masm_);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), Token::ADD, language_mode()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}

11
deps/v8/src/arm64/interface-descriptors-arm64.cc

@ -60,6 +60,11 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return x2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x2: function info
@ -92,6 +97,12 @@ void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context

51
deps/v8/src/arm64/lithium-arm64.cc

@ -1221,6 +1221,15 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
}
LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
HCheckArrayBufferNotNeutered* instr) {
LOperand* view = UseRegisterAtStart(instr->value());
LCheckArrayBufferNotNeutered* result =
new (zone()) LCheckArrayBufferNotNeutered(view);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@ -1586,17 +1595,10 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (FLAG_vector_ics) {
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
vector =
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
}
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register, slot, vector);
context, receiver_register, name_register);
}
@ -1719,21 +1721,24 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
instr->RequiresHoleCheck())
? TempRegister()
: NULL;
LLoadKeyedFixedDouble* result =
new(zone()) LLoadKeyedFixedDouble(elements, key, temp);
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
LInstruction* result = DefineAsRegister(
new (zone()) LLoadKeyedFixedDouble(elements, key, temp));
if (instr->RequiresHoleCheck()) {
result = AssignEnvironment(result);
}
return result;
} else {
DCHECK(instr->representation().IsSmiOrTagged() ||
instr->representation().IsInteger32());
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LLoadKeyedFixed* result =
new(zone()) LLoadKeyedFixed(elements, key, temp);
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
LInstruction* result =
DefineAsRegister(new (zone()) LLoadKeyedFixed(elements, key, temp));
if (instr->RequiresHoleCheck() ||
(instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED &&
info()->IsStub())) {
result = AssignEnvironment(result);
}
return result;
}
} else {
DCHECK((instr->representation().IsInteger32() &&
@ -2564,12 +2569,8 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// TODO(jbramley): In ARM, this uses UseFixed to force the input to x0.
// However, LCodeGen::DoTypeof just pushes it to the stack (for CallRuntime)
// anyway, so the input doesn't have to be in x0. We might be able to improve
// the ARM back-end a little by relaxing this restriction.
LTypeof* result =
new(zone()) LTypeof(context, UseRegisterAtStart(instr->value()));
LOperand* value = UseFixed(instr->value(), x3);
LTypeof* result = new (zone()) LTypeof(context, value);
return MarkAsCall(DefineFixed(result, x0), instr);
}

466
deps/v8/src/arm64/lithium-arm64.h

File diff suppressed because it is too large

121
deps/v8/src/arm64/lithium-codegen-arm64.cc

@ -18,7 +18,7 @@ namespace v8 {
namespace internal {
class SafepointGenerator FINAL : public CallWrapper {
class SafepointGenerator final : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@ -460,7 +460,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->constructor()).is(x1));
__ Mov(x0, Operand(instr->arity()));
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
if (instr->arity() == 1) {
// We only need the allocation site for the case we have a length argument.
// The case may bail out to the runtime, which will determine the correct
// elements kind with the site.
__ Mov(x2, instr->hydrogen()->site());
} else {
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
}
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@ -660,7 +668,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
@ -703,6 +711,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
DCHECK(!info()->scope()->is_script_scope());
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
@ -1552,13 +1561,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->size()->IsConstantOperand()) {
@ -1613,13 +1618,9 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
}
int flags = AllocateDoubleAlignFlag::encode(
instr->hydrogen()->MustAllocateDoubleAligned());
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
flags = AllocateTargetSpace::update(flags, OLD_SPACE);
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
@ -1771,7 +1772,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@ -2031,29 +2033,14 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
Register extra = x5;
Register extra2 = x6;
Register extra3 = x7;
DCHECK(!FLAG_vector_ics ||
!AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
scratch, extra, extra2, extra3));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
if (!instr->hydrogen()->is_just_miss()) {
DCHECK(!instr->hydrogen()->is_keyed_load());
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
receiver, name, scratch, extra, extra2, extra3);
}
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
if (instr->hydrogen()->is_keyed_load()) {
KeyedLoadIC::GenerateMiss(masm());
} else {
LoadIC::GenerateMiss(masm());
}
LoadIC::GenerateMiss(masm());
}
@ -2242,6 +2229,19 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
}
void LCodeGen::DoCheckArrayBufferNotNeutered(
LCheckArrayBufferNotNeutered* instr) {
UseScratchRegisterScope temps(masm());
Register view = ToRegister(instr->view());
Register scratch = temps.AcquireX();
__ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
Register scratch = ToRegister(instr->temp());
@ -2911,13 +2911,6 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
Deoptimizer::kUndefined);
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(object, null_value);
DeoptimizeIf(eq, instr, Deoptimizer::kNull);
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@ -2925,6 +2918,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
Label use_cache, call_runtime;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
__ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
@ -3634,6 +3628,22 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
Deoptimizer::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
Label done;
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
__ B(ne, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
// protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
DeoptimizeIf(ne, instr, Deoptimizer::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
}
}
@ -3642,7 +3652,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (FLAG_vector_ics) {
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
@ -4338,7 +4349,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
Register left =
is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
int32_t right = ToInteger32(instr->right());
DCHECK((right > -kMaxInt) || (right < kMaxInt));
DCHECK((right > -kMaxInt) && (right < kMaxInt));
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero =
@ -5784,9 +5795,17 @@ void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
Register input = ToRegister(instr->value());
__ Push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
DCHECK(ToRegister(instr->value()).is(x3));
DCHECK(ToRegister(instr->result()).is(x0));
Label end, do_call;
Register value_register = ToRegister(instr->value());
__ JumpIfNotSmi(value_register, &do_call);
__ Mov(x0, Immediate(isolate()->factory()->number_string()));
__ B(&end);
__ Bind(&do_call);
TypeofStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Bind(&end);
}
@ -5960,7 +5979,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
class DeferredLoadMutableDouble FINAL : public LDeferredCode {
class DeferredLoadMutableDouble final : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
@ -5973,10 +5992,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
void Generate() OVERRIDE {
void Generate() override {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() override { return instr_; }
private:
LLoadFieldByIndex* instr_;

6
deps/v8/src/arm64/lithium-codegen-arm64.h

@ -276,7 +276,7 @@ class LCodeGen: public LCodeGenBase {
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
void GenerateBodyInstructionPre(LInstruction* instr) override;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
@ -324,7 +324,7 @@ class LCodeGen: public LCodeGenBase {
LInstruction* instr);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) OVERRIDE;
void RecordAndWritePosition(int position) override;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@ -337,7 +337,7 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void EnsureSpaceForLazyDeopt(int space_needed) override;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;

1
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -12,7 +12,6 @@
#include "src/codegen.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {

2
deps/v8/src/arm64/macro-assembler-arm64.h

@ -1284,7 +1284,7 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space or old pointer space. The object_size is
// Allocate an object in new space or old space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. The allocated object is returned in result.
//

107
deps/v8/src/arm64/regexp-macro-assembler-arm64.cc

@ -1285,104 +1285,19 @@ static T& frame_entry(Address re_frame, int frame_offset) {
}
int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
const byte** input_start,
const byte** input_end) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
// If this is a direct call from JavaScript retry the RegExp forcing the call
// through the runtime system. Currently the direct call cannot handle a GC.
if (frame_entry<int>(re_frame, kDirectCall) == 1) {
return RETRY;
}
// Prepare for possible GC.
HandleScope handles(isolate);
Handle<Code> code_handle(re_code);
Handle<String> subject(frame_entry<String*>(re_frame, kInput));
// Current string.
bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
// Overwrite the return address on the stack.
*return_address += delta;
}
if (result->IsException()) {
return EXCEPTION;
}
Handle<String> subject_tmp = subject;
int slice_offset = 0;
// Extract the underlying string and the slice offset.
if (StringShape(*subject_tmp).IsCons()) {
subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
} else if (StringShape(*subject_tmp).IsSliced()) {
SlicedString* slice = SlicedString::cast(*subject_tmp);
subject_tmp = Handle<String>(slice->parent());
slice_offset = slice->offset();
}
// String might have changed.
if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
// If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
return RETRY;
}
template <typename T>
static T* frame_entry_address(Address re_frame, int frame_offset) {
return reinterpret_cast<T*>(re_frame + frame_offset);
}
// Otherwise, the content of the string might have moved. It must still
// be a sequential or external string with the same content.
// Update the start and end pointers in the stack frame to the current
// location (whether it has actually moved or not).
DCHECK(StringShape(*subject_tmp).IsSequential() ||
StringShape(*subject_tmp).IsExternal());
// The original start address of the characters to match.
const byte* start_address = *input_start;
// Find the current start address of the same character at the current string
// position.
const byte* new_address = StringCharacterPosition(*subject_tmp,
start_offset + slice_offset);
if (start_address != new_address) {
// If there is a difference, update the object pointer and start and end
// addresses in the RegExp stack frame to match the new value.
const byte* end_address = *input_end;
int byte_length = static_cast<int>(end_address - start_address);
frame_entry<const String*>(re_frame, kInput) = *subject;
*input_start = new_address;
*input_end = new_address + byte_length;
} else if (frame_entry<const String*>(re_frame, kInput) != *subject) {
// Subject string might have been a ConsString that underwent
// short-circuiting during GC. That will not change start_address but
// will change pointer inside the subject handle.
frame_entry<const String*>(re_frame, kInput) = *subject;
}
return 0;
int RegExpMacroAssemblerARM64::CheckStackGuardState(
Address* return_address, Code* re_code, Address re_frame, int start_index,
const byte** input_start, const byte** input_end) {
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate), start_index,
frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
frame_entry_address<String*>(re_frame, kInput), input_start, input_end);
}

114
deps/v8/src/array-iterator.js

@ -2,13 +2,35 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var $iteratorCreateResultObject;
var $arrayValues;
(function(global, shared, exports) {
"use strict";
%CheckIsBootstrapping();
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
var GlobalArray = global.Array;
var GlobalObject = global.Object;
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
FUNCTION(Int8Array)
FUNCTION(Uint16Array)
FUNCTION(Int16Array)
FUNCTION(Uint32Array)
FUNCTION(Int32Array)
FUNCTION(Float32Array)
FUNCTION(Float64Array)
FUNCTION(Uint8ClampedArray)
endmacro
macro COPY_FROM_GLOBAL(NAME)
var GlobalNAME = global.NAME;
endmacro
TYPED_ARRAYS(COPY_FROM_GLOBAL)
var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
@ -25,7 +47,7 @@ function ArrayIterator() {}
// 15.4.5.1 CreateArrayIterator Abstract Operation
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var object = $toObject(array);
var iterator = new ArrayIterator;
SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
@ -48,11 +70,11 @@ function ArrayIteratorIterator() {
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
var iterator = $toObject(this);
if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
throw MakeTypeError(kIncompatibleMethodReceiver,
'Array Iterator.prototype.next', this);
}
var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
@ -100,60 +122,38 @@ function ArrayKeys() {
}
function SetUpArrayIterator() {
%CheckIsBootstrapping();
%FunctionSetPrototype(ArrayIterator, new $Object());
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
'next', ArrayIteratorNext
));
%FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]');
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
}
SetUpArrayIterator();
function ExtendArrayPrototype() {
%CheckIsBootstrapping();
%FunctionSetPrototype(ArrayIterator, new GlobalObject());
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'keys', ArrayKeys
));
$installFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
$setFunctionName(ArrayIteratorIterator, symbolIterator);
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
%AddNamedProperty($Array.prototype, symbolIterator, ArrayValues, DONT_ENUM);
}
ExtendArrayPrototype();
$installFunctions(GlobalArray.prototype, DONT_ENUM, [
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'keys', ArrayKeys
]);
function ExtendTypedArrayPrototypes() {
%CheckIsBootstrapping();
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
FUNCTION(Int8Array)
FUNCTION(Uint16Array)
FUNCTION(Int16Array)
FUNCTION(Uint32Array)
FUNCTION(Int32Array)
FUNCTION(Float32Array)
FUNCTION(Float64Array)
FUNCTION(Uint8ClampedArray)
endmacro
%AddNamedProperty(GlobalArray.prototype, symbolIterator, ArrayValues,
DONT_ENUM);
macro EXTEND_TYPED_ARRAY(NAME)
%AddNamedProperty($NAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
%AddNamedProperty($NAME.prototype, 'values', ArrayValues, DONT_ENUM);
%AddNamedProperty($NAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
%AddNamedProperty($NAME.prototype, symbolIterator, ArrayValues, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, 'values', ArrayValues, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, symbolIterator, ArrayValues,
DONT_ENUM);
endmacro
TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
}
ExtendTypedArrayPrototypes();
TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
$iteratorCreateResultObject = CreateIteratorResultObject;
$arrayValues = ArrayValues;
})

400
deps/v8/src/array.js

@ -2,11 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var $arrayConcat;
var $arrayJoin;
var $arrayPush;
var $arrayPop;
var $arrayShift;
var $arraySlice;
var $arraySplice;
var $arrayUnshift;
(function(global, shared, exports) {
"use strict";
// This file relies on the fact that the following declarations have been made
// in runtime.js:
// var $Array = global.Array;
%CheckIsBootstrapping();
var GlobalArray = global.Array;
// -------------------------------------------------------------------
@ -185,7 +196,7 @@ function ConvertToString(x) {
// Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
return (IS_NULL_OR_UNDEFINED(x)) ? '' : $toString($defaultString(x));
}
@ -196,8 +207,8 @@ function ConvertToLocaleString(e) {
// According to ES5, section 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
var e_obj = ToObject(e);
return %ToString(e_obj.toLocaleString());
var e_obj = $toObject(e);
return $toString(e_obj.toLocaleString());
}
}
@ -357,18 +368,18 @@ function ArrayToString() {
}
array = this;
} else {
array = ToObject(this);
array = $toObject(this);
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, DefaultObjectToString);
return %_CallFunction(array, $objectToString);
}
return %_CallFunction(array, func);
}
function ArrayToLocaleString() {
var array = ToObject(this);
var array = $toObject(this);
var arrayLen = array.length;
var len = TO_UINT32(arrayLen);
if (len === 0) return "";
@ -384,7 +395,7 @@ function ArrayJoin(separator) {
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
separator = NonStringToString(separator);
separator = $nonStringToString(separator);
}
var result = %_FastOneByteArrayJoin(array, separator);
@ -395,7 +406,7 @@ function ArrayJoin(separator) {
var e = array[0];
if (IS_STRING(e)) return e;
if (IS_NULL_OR_UNDEFINED(e)) return '';
return NonStringToString(e);
return $nonStringToString(e);
}
return Join(array, length, separator, ConvertToString);
@ -407,17 +418,18 @@ function ObservedArrayPop(n) {
var value = this[n];
try {
BeginPerformSplice(this);
$observeBeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [value], 0);
$observeEndPerformSplice(this);
$observeEnqueueSpliceRecord(this, n, [value], 0);
}
return value;
}
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
@ -435,7 +447,7 @@ function ArrayPop() {
n--;
var value = array[n];
Delete(array, ToName(n), true);
$delete(array, $toName(n), true);
array.length = n;
return value;
}
@ -446,20 +458,21 @@ function ObservedArrayPush() {
var m = %_ArgumentsLength();
try {
BeginPerformSplice(this);
$observeBeginPerformSplice(this);
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
var new_length = n + m;
this.length = new_length;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, n, [], m);
$observeEndPerformSplice(this);
$observeEnqueueSpliceRecord(this, n, [], m);
}
return new_length;
}
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
@ -488,7 +501,7 @@ function ArrayPush() {
function ArrayConcatJS(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
var array = ToObject(this);
var array = $toObject(this);
var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count);
arrays[0] = array;
@ -584,17 +597,18 @@ function ObservedArrayShift(len) {
var first = this[0];
try {
BeginPerformSplice(this);
$observeBeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [first], 0);
$observeEndPerformSplice(this);
$observeEnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
}
function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
@ -606,10 +620,7 @@ function ArrayShift() {
return;
}
if (ObjectIsSealed(array)) {
throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.shift"]);
}
if ($objectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array))
return ObservedArrayShift.call(array, len);
@ -627,12 +638,13 @@ function ArrayShift() {
return first;
}
function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
try {
BeginPerformSplice(this);
$observeBeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
@ -640,13 +652,14 @@ function ObservedArrayUnshift() {
var new_length = len + num_arguments;
this.length = new_length;
} finally {
EndPerformSplice(this);
EnqueueSpliceRecord(this, 0, [], num_arguments);
$observeEndPerformSplice(this);
$observeEnqueueSpliceRecord(this, 0, [], num_arguments);
}
return new_length;
}
function ArrayUnshift(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
@ -658,7 +671,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
!ObjectIsSealed(array)) {
!$objectIsSealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
@ -758,7 +771,7 @@ function ObservedArraySplice(start, delete_count) {
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
try {
BeginPerformSplice(this);
$observeBeginPerformSplice(this);
SimpleSlice(this, start_i, del_count, len, deleted_elements);
SimpleMove(this, start_i, del_count, len, num_elements_to_add);
@ -774,12 +787,12 @@ function ObservedArraySplice(start, delete_count) {
this.length = len - del_count + num_elements_to_add;
} finally {
EndPerformSplice(this);
$observeEndPerformSplice(this);
if (deleted_elements.length || num_elements_to_add) {
EnqueueSpliceRecord(this,
start_i,
deleted_elements.slice(),
num_elements_to_add);
$observeEnqueueSpliceRecord(this,
start_i,
deleted_elements.slice(),
num_elements_to_add);
}
}
@ -804,12 +817,10 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
throw MakeTypeError("array_functions_change_sealed",
["Array.prototype.splice"]);
} else if (del_count > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError("array_functions_on_frozen",
["Array.prototype.splice"]);
if (del_count != num_elements_to_add && $objectIsSealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed);
} else if (del_count > 0 && $objectIsFrozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
var changed_elements = del_count;
@ -855,20 +866,18 @@ function ArraySort(comparefn) {
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
x = ToString(x);
y = ToString(y);
x = $toString(x);
y = $toString(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
}
var receiver = %GetDefaultReceiver(comparefn);
var InsertionSort = function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = %_CallFunction(receiver, tmp, element, comparefn);
var order = %_CallFunction(UNDEFINED, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
@ -887,7 +896,7 @@ function ArraySort(comparefn) {
t_array[j] = [i, a[i]];
}
%_CallFunction(t_array, function(a, b) {
return %_CallFunction(receiver, a[1], b[1], comparefn);
return %_CallFunction(UNDEFINED, a[1], b[1], comparefn);
}, ArraySort);
var third_index = t_array[t_array.length >> 1][0];
return third_index;
@ -910,14 +919,14 @@ function ArraySort(comparefn) {
var v0 = a[from];
var v1 = a[to - 1];
var v2 = a[third_index];
var c01 = %_CallFunction(receiver, v0, v1, comparefn);
var c01 = %_CallFunction(UNDEFINED, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
var c02 = %_CallFunction(receiver, v0, v2, comparefn);
var c02 = %_CallFunction(UNDEFINED, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
@ -926,7 +935,7 @@ function ArraySort(comparefn) {
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
var c12 = %_CallFunction(receiver, v1, v2, comparefn);
var c12 = %_CallFunction(UNDEFINED, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
@ -947,7 +956,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
var order = %_CallFunction(receiver, element, pivot, comparefn);
var order = %_CallFunction(UNDEFINED, element, pivot, comparefn);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
@ -957,7 +966,7 @@ function ArraySort(comparefn) {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
order = %_CallFunction(receiver, top_elem, pivot, comparefn);
order = %_CallFunction(UNDEFINED, top_elem, pivot, comparefn);
} while (order > 0);
a[i] = a[high_start];
a[high_start] = element;
@ -1139,20 +1148,18 @@ function ArrayFilter(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
var array = $toObject(this);
var length = $toUint32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else {
if (IS_NULL(receiver)) {
if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new $Array();
var result = new GlobalArray();
var accumulator = new InternalArray();
var accumulator_length = 0;
var is_array = IS_ARRAY(array);
@ -1162,7 +1169,7 @@ function ArrayFilter(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) {
accumulator[accumulator_length++] = element;
}
@ -1178,16 +1185,14 @@ function ArrayForEach(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var array = $toObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else {
if (IS_NULL(receiver)) {
if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
@ -1198,7 +1203,7 @@ function ArrayForEach(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
%_CallFunction(new_receiver, element, i, array, f);
}
}
@ -1212,16 +1217,14 @@ function ArraySome(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var array = $toObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else {
if (IS_NULL(receiver)) {
if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
@ -1232,7 +1235,7 @@ function ArraySome(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
if (%_CallFunction(new_receiver, element, i, array, f)) return true;
}
}
@ -1245,16 +1248,14 @@ function ArrayEvery(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var array = $toObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else {
if (IS_NULL(receiver)) {
if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
@ -1265,32 +1266,31 @@ function ArrayEvery(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
if (!%_CallFunction(new_receiver, element, i, array, f)) return false;
}
}
return true;
}
function ArrayMap(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var array = $toObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else {
if (IS_NULL(receiver)) {
if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
} else if (!IS_UNDEFINED(receiver)) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new $Array();
var result = new GlobalArray();
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1299,7 +1299,7 @@ function ArrayMap(f, receiver) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
var new_receiver = needs_wrapper ? ToObject(receiver) : receiver;
var new_receiver = needs_wrapper ? $toObject(receiver) : receiver;
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
@ -1423,11 +1423,11 @@ function ArrayReduce(callback, current) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
var array = $toObject(this);
var length = $toUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
@ -1439,32 +1439,32 @@ function ArrayReduce(callback, current) {
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
throw MakeTypeError(kReduceNoInitial);
}
var receiver = %GetDefaultReceiver(callback);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);
current = %_CallFunction(receiver, current, element, i, array, callback);
current = %_CallFunction(UNDEFINED, current, element, i, array, callback);
}
}
return current;
}
function ArrayReduceRight(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = ToObject(this);
var length = ToUint32(array.length);
var array = $toObject(this);
var length = $toUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
@ -1476,17 +1476,16 @@ function ArrayReduceRight(callback, current) {
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
throw MakeTypeError(kReduceNoInitial);
}
var receiver = %GetDefaultReceiver(callback);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);
current = %_CallFunction(receiver, current, element, i, array, callback);
current = %_CallFunction(UNDEFINED, current, element, i, array, callback);
}
}
return current;
@ -1500,91 +1499,100 @@ function ArrayIsArray(obj) {
// -------------------------------------------------------------------
function SetUpArray() {
%CheckIsBootstrapping();
// Set up non-enumerable constructor property on the Array.prototype
// object.
%AddNamedProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
copyWithin: true,
entries: true,
fill: true,
find: true,
findIndex: true,
keys: true,
};
%AddNamedProperty($Array.prototype, symbolUnscopables, unscopables,
DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object.
InstallFunctions($Array, DONT_ENUM, $Array(
"isArray", ArrayIsArray
));
var specialFunctions = %SpecialArrayFunctions();
var getFunction = function(name, jsBuiltin, len) {
var f = jsBuiltin;
if (specialFunctions.hasOwnProperty(name)) {
f = specialFunctions[name];
}
if (!IS_UNDEFINED(len)) {
%FunctionSetLength(f, len);
}
return f;
};
// Set up non-enumerable functions of the Array.prototype object and
// set their names.
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
"concat", getFunction("concat", ArrayConcatJS, 1),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),
"slice", getFunction("slice", ArraySlice, 2),
"splice", getFunction("splice", ArraySplice, 2),
"sort", getFunction("sort", ArraySort),
"filter", getFunction("filter", ArrayFilter, 1),
"forEach", getFunction("forEach", ArrayForEach, 1),
"some", getFunction("some", ArraySome, 1),
"every", getFunction("every", ArrayEvery, 1),
"map", getFunction("map", ArrayMap, 1),
"indexOf", getFunction("indexOf", ArrayIndexOf, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
"reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
));
%FinishArrayPrototypeSetup($Array.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
"concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"splice", getFunction("splice", ArraySplice)
));
SetUpLockedPrototype(InternalPackedArray, $Array(), $Array(
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush)
));
}
SetUpArray();
// Set up non-enumerable constructor property on the Array.prototype
// object.
%AddNamedProperty(GlobalArray.prototype, "constructor", GlobalArray,
DONT_ENUM);
// Set up unscopable properties on the Array.prototype object.
var unscopables = {
__proto__: null,
copyWithin: true,
entries: true,
fill: true,
find: true,
findIndex: true,
keys: true,
};
%AddNamedProperty(GlobalArray.prototype, symbolUnscopables, unscopables,
DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object.
$installFunctions(GlobalArray, DONT_ENUM, [
"isArray", ArrayIsArray
]);
var specialFunctions = %SpecialArrayFunctions();
var getFunction = function(name, jsBuiltin, len) {
var f = jsBuiltin;
if (specialFunctions.hasOwnProperty(name)) {
f = specialFunctions[name];
}
if (!IS_UNDEFINED(len)) {
%FunctionSetLength(f, len);
}
return f;
};
// Set up non-enumerable functions of the Array.prototype object and
// set their names.
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
$installFunctions(GlobalArray.prototype, DONT_ENUM, [
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
"concat", getFunction("concat", ArrayConcatJS, 1),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),
"slice", getFunction("slice", ArraySlice, 2),
"splice", getFunction("splice", ArraySplice, 2),
"sort", getFunction("sort", ArraySort),
"filter", getFunction("filter", ArrayFilter, 1),
"forEach", getFunction("forEach", ArrayForEach, 1),
"some", getFunction("some", ArraySome, 1),
"every", getFunction("every", ArrayEvery, 1),
"map", getFunction("map", ArrayMap, 1),
"indexOf", getFunction("indexOf", ArrayIndexOf, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
"reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
]);
%FinishArrayPrototypeSetup(GlobalArray.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
$setUpLockedPrototype(InternalArray, GlobalArray(), [
"concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift),
"splice", getFunction("splice", ArraySplice)
]);
$setUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift)
]);
$arrayConcat = ArrayConcatJS;
$arrayJoin = ArrayJoin;
$arrayPush = ArrayPush;
$arrayPop = ArrayPop;
$arrayShift = ArrayShift;
$arraySlice = ArraySlice;
$arraySplice = ArraySplice;
$arrayUnshift = ArrayUnshift;
})

56
deps/v8/src/arraybuffer.js

@ -2,25 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
(function(global, shared, exports) {
"use strict";
var $ArrayBuffer = global.ArrayBuffer;
%CheckIsBootstrapping();
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalObject = global.Object;
// -------------------------------------------------------------------
function ArrayBufferConstructor(length) { // length = 1
if (%_IsConstructCall()) {
var byteLength = ToPositiveInteger(length, 'invalid_array_buffer_length');
var byteLength = $toPositiveInteger(length, kInvalidArrayBufferLength);
%ArrayBufferInitialize(this, byteLength);
} else {
throw MakeTypeError('constructor_not_function', ["ArrayBuffer"]);
throw MakeTypeError(kConstructorNotFunction, "ArrayBuffer");
}
}
function ArrayBufferGetByteLen() {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError('incompatible_method_receiver',
['ArrayBuffer.prototype.byteLength', this]);
throw MakeTypeError(kIncompatibleMethodReceiver,
'ArrayBuffer.prototype.byteLength', this);
}
return %_ArrayBufferGetByteLength(this);
}
@ -28,8 +33,8 @@ function ArrayBufferGetByteLen() {
// ES6 Draft 15.13.5.5.3
function ArrayBufferSlice(start, end) {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError('incompatible_method_receiver',
['ArrayBuffer.prototype.slice', this]);
throw MakeTypeError(kIncompatibleMethodReceiver,
'ArrayBuffer.prototype.slice', this);
}
var relativeStart = TO_INTEGER(start);
@ -56,7 +61,7 @@ function ArrayBufferSlice(start, end) {
}
var newLen = fin - first;
// TODO(dslomov): implement inheritance
var result = new $ArrayBuffer(newLen);
var result = new GlobalArrayBuffer(newLen);
%ArrayBufferSliceImpl(this, result, first);
return result;
@ -66,29 +71,26 @@ function ArrayBufferIsViewJS(obj) {
return %ArrayBufferIsView(obj);
}
function SetUpArrayBuffer() {
%CheckIsBootstrapping();
// Set up the ArrayBuffer constructor function.
%SetCode($ArrayBuffer, ArrayBufferConstructor);
%FunctionSetPrototype($ArrayBuffer, new $Object());
// Set up the ArrayBuffer constructor function.
%SetCode(GlobalArrayBuffer, ArrayBufferConstructor);
%FunctionSetPrototype(GlobalArrayBuffer, new GlobalObject());
// Set up the constructor property on the ArrayBuffer prototype object.
%AddNamedProperty(
$ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
// Set up the constructor property on the ArrayBuffer prototype object.
%AddNamedProperty(
GlobalArrayBuffer.prototype, "constructor", GlobalArrayBuffer, DONT_ENUM);
%AddNamedProperty($ArrayBuffer.prototype,
symbolToStringTag, "ArrayBuffer", DONT_ENUM | READ_ONLY);
%AddNamedProperty(GlobalArrayBuffer.prototype,
symbolToStringTag, "ArrayBuffer", DONT_ENUM | READ_ONLY);
InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
$installGetter(GlobalArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
InstallFunctions($ArrayBuffer, DONT_ENUM, $Array(
"isView", ArrayBufferIsViewJS
));
$installFunctions(GlobalArrayBuffer, DONT_ENUM, [
"isView", ArrayBufferIsViewJS
]);
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
}
$installFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
]);
SetUpArrayBuffer();
})

34
deps/v8/src/assembler.cc

@ -40,6 +40,7 @@
#include "src/base/functional.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins.h"
#include "src/codegen.h"
#include "src/counters.h"
@ -49,7 +50,6 @@
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
@ -1216,30 +1216,15 @@ ExternalReference ExternalReference::new_space_allocation_limit_address(
}
ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
ExternalReference ExternalReference::old_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldPointerSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldPointerSpaceAllocationLimitAddress());
return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_data_space_allocation_top_address(
ExternalReference ExternalReference::old_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->OldDataSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_data_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldDataSpaceAllocationLimitAddress());
return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
}
@ -1630,19 +1615,20 @@ bool PositionsRecorder::WriteRecordedPositions() {
EnsureSpace ensure_space(assembler_);
assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
state_.current_statement_position);
state_.written_statement_position = state_.current_statement_position;
written = true;
}
state_.written_statement_position = state_.current_statement_position;
// Write the position if it is different from what was written last time and
// also different from the written statement position.
// also different from the statement position that was just written.
if (state_.current_position != state_.written_position &&
state_.current_position != state_.written_statement_position) {
(state_.current_position != state_.written_statement_position ||
!written)) {
EnsureSpace ensure_space(assembler_);
assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
state_.written_position = state_.current_position;
written = true;
}
state_.written_position = state_.current_position;
// Return whether something was written.
return written;

11
deps/v8/src/assembler.h

@ -435,6 +435,7 @@ class RelocInfo {
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
}
static inline bool IsCell(Mode mode) { return mode == CELL; }
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
@ -939,14 +940,8 @@ class ExternalReference BASE_EMBEDDED {
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
static ExternalReference old_pointer_space_allocation_top_address(
Isolate* isolate);
static ExternalReference old_pointer_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference old_data_space_allocation_top_address(
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference old_space_allocation_top_address(Isolate* isolate);
static ExternalReference old_space_allocation_limit_address(Isolate* isolate);
static ExternalReference mod_two_doubles_operation(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);

7
deps/v8/src/assert-scope.cc

@ -6,7 +6,8 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/isolate-inl.h"
#include "src/debug.h"
#include "src/isolate.h"
#include "src/utils.h"
namespace v8 {
@ -14,7 +15,7 @@ namespace internal {
namespace {
struct PerThreadAssertKeyConstructTrait FINAL {
struct PerThreadAssertKeyConstructTrait final {
static void Construct(base::Thread::LocalStorageKey* key) {
*key = base::Thread::CreateThreadLocalKey();
}
@ -31,7 +32,7 @@ PerThreadAssertKey kPerThreadAssertKey;
} // namespace
class PerThreadAssertData FINAL {
class PerThreadAssertData final {
public:
PerThreadAssertData() : nesting_level_(0) {
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {

46
deps/v8/src/ast-numbering.cc

@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
class AstNumberingVisitor FINAL : public AstVisitor {
class AstNumberingVisitor final : public AstVisitor {
public:
explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
@ -27,14 +27,14 @@ class AstNumberingVisitor FINAL : public AstVisitor {
private:
// AST node visitor interface.
#define DEFINE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
bool Finish(FunctionLiteral* node);
void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
void VisitStatements(ZoneList<Statement*>* statements) override;
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
@ -106,12 +106,6 @@ void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
}
void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
IncrementNodeCount();
DisableOptimization(kModuleUrl);
}
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
IncrementNodeCount();
}
@ -179,14 +173,6 @@ void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
}
void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
IncrementNodeCount();
DisableOptimization(kModuleDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}
void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
DisableOptimization(kImportDeclaration);
@ -194,20 +180,6 @@ void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
}
void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
IncrementNodeCount();
DisableOptimization(kModulePath);
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
IncrementNodeCount();
DisableOptimization(kModuleStatement);
Visit(node->body());
}
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
IncrementNodeCount();
Visit(node->expression());
@ -266,13 +238,6 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
}
void AstNumberingVisitor::VisitModuleLiteral(ModuleLiteral* node) {
IncrementNodeCount();
DisableCaching(kModuleLiteral);
VisitBlock(node->body());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
@ -362,6 +327,9 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
}
void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();

32
deps/v8/src/ast-value-factory.cc

@ -56,20 +56,20 @@ class AstRawStringInternalizationKey : public HashTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {}
bool IsMatch(Object* other) OVERRIDE {
bool IsMatch(Object* other) override {
if (string_->is_one_byte_)
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
uint32_t Hash() OVERRIDE { return string_->hash() >> Name::kHashShift; }
uint32_t Hash() override { return string_->hash() >> Name::kHashShift; }
uint32_t HashForObject(Object* key) OVERRIDE {
uint32_t HashForObject(Object* key) override {
return String::cast(key)->Hash();
}
Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
Handle<Object> AsHandle(Isolate* isolate) override {
if (string_->is_one_byte_)
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
@ -114,19 +114,6 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
}
bool AstRawString::Compare(void* a, void* b) {
return *static_cast<AstRawString*>(a) == *static_cast<AstRawString*>(b);
}
bool AstRawString::operator==(const AstRawString& rhs) const {
if (is_one_byte_ != rhs.is_one_byte_) return false;
if (hash_ != rhs.hash_) return false;
int len = literal_bytes_.length();
if (rhs.literal_bytes_.length() != len) return false;
return memcmp(literal_bytes_.start(), rhs.literal_bytes_.start(), len) == 0;
}
void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are
// already internalized.
@ -363,7 +350,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash);
HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
if (entry->value == NULL) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
@ -382,4 +369,13 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
}
bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
if (lhs->is_one_byte() != rhs->is_one_byte()) return false;
if (lhs->hash() != rhs->hash()) return false;
int len = lhs->byte_length();
if (rhs->byte_length() != len) return false;
return memcmp(lhs->raw_data(), rhs->raw_data(), len) == 0;
}
} } // namespace v8::internal

26
deps/v8/src/ast-value-factory.h

@ -64,13 +64,15 @@ class AstString : public ZoneObject {
class AstRawString : public AstString {
public:
int length() const OVERRIDE {
int length() const override {
if (is_one_byte_)
return literal_bytes_.length();
return literal_bytes_.length() / 2;
}
void Internalize(Isolate* isolate) OVERRIDE;
int byte_length() const { return literal_bytes_.length(); }
void Internalize(Isolate* isolate) override;
bool AsArrayIndex(uint32_t* index) const;
@ -92,9 +94,6 @@ class AstRawString : public AstString {
uint32_t hash() const {
return hash_;
}
static bool Compare(void* a, void* b);
bool operator==(const AstRawString& rhs) const;
private:
friend class AstValueFactory;
@ -122,9 +121,9 @@ class AstConsString : public AstString {
: left_(left),
right_(right) {}
int length() const OVERRIDE { return left_->length() + right_->length(); }
int length() const override { return left_->length() + right_->length(); }
void Internalize(Isolate* isolate) OVERRIDE;
void Internalize(Isolate* isolate) override;
private:
friend class AstValueFactory;
@ -245,7 +244,7 @@ class AstValue : public ZoneObject {
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(get_template_callsite, "GetTemplateCallSite") \
F(get_template_callsite, "$getTemplateCallSite") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(is_construct_call, "_IsConstructCall") \
@ -259,9 +258,14 @@ class AstValue : public ZoneObject {
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(reflect_apply, "$reflectApply") \
F(reflect_construct, "$reflectConstruct") \
F(spread_arguments, "$spreadArguments") \
F(spread_iterable, "$spreadIterable") \
F(this, "this") \
F(throw_iterator_result_not_an_object, "ThrowIteratorResultNotAnObject") \
F(to_string, "ToString") \
F(to_string, "$toString") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
F(use_strong, "use strong") \
F(use_strict, "use strict") \
@ -277,7 +281,7 @@ class AstValue : public ZoneObject {
class AstValueFactory {
public:
AstValueFactory(Zone* zone, uint32_t hash_seed)
: string_table_(AstRawString::Compare),
: string_table_(AstRawStringCompare),
zone_(zone),
isolate_(NULL),
hash_seed_(hash_seed) {
@ -340,6 +344,8 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
HashMap string_table_;
// For keeping track of all AstValues and AstRawStrings we've created (so that

10
deps/v8/src/ast.cc

@ -271,7 +271,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
// If there is an existing entry do not emit a store unless the previous
// entry was also an accessor.
uint32_t hash = literal->Hash();
ZoneHashMap::Entry* entry = table.Lookup(literal, hash, true, allocator);
ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash, allocator);
if (entry->value != NULL) {
auto previous_kind =
static_cast<ObjectLiteral::Property*>(entry->value)->kind();
@ -789,12 +789,12 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
class RegExpUnparser FINAL : public RegExpVisitor {
class RegExpUnparser final : public RegExpVisitor {
public:
RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
void* data) OVERRIDE;
#define MAKE_CASE(Name) \
virtual void* Visit##Name(RegExp##Name*, void* data) override;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
@ -1004,7 +1004,7 @@ uint32_t Literal::Hash() {
bool Literal::Match(void* literal1, void* literal2) {
const AstValue* x = static_cast<Literal*>(literal1)->raw_value();
const AstValue* y = static_cast<Literal*>(literal2)->raw_value();
return (x->IsString() && y->IsString() && *x->AsString() == *y->AsString()) ||
return (x->IsString() && y->IsString() && x->AsString() == y->AsString()) ||
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}

689
deps/v8/src/ast.h

File diff suppressed because it is too large

11
deps/v8/src/bailout-reason.h

@ -94,7 +94,6 @@ namespace internal {
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
V(kForInStatementOptimizationIsDisabled, \
"ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
@ -157,14 +156,7 @@ namespace internal {
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
V(kMapIsNoLongerInEax, "Map is no longer in eax") \
V(kModuleDeclaration, "Module declaration") \
V(kModuleLiteral, "Module literal") \
V(kModulePath, "Module path") \
V(kModuleStatement, "Module statement") \
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kSuperReference, "Super reference") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin, \
@ -215,10 +207,12 @@ namespace internal {
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kScriptContext, "Allocation of script context") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSuperReference, "Super reference") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheInstructionShouldBeALis, "The instruction should be a lis") \
@ -323,7 +317,6 @@ namespace internal {
"Wrong address or value passed to RecordWrite") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
V(kOsrCompileFailed, "OSR compilation failed") \
V(kYield, "Yield")

1
deps/v8/src/base/OWNERS

@ -0,0 +1 @@
jochen@chromium.org

51
deps/v8/src/base/adapters.h

@ -0,0 +1,51 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Slightly adapted for inclusion in V8.
// Copyright 2014 the V8 project authors. All rights reserved.
#ifndef V8_BASE_ADAPTERS_H_
#define V8_BASE_ADAPTERS_H_
#include "src/base/macros.h"
namespace v8 {
namespace base {
// Internal adapter class for implementing base::Reversed.
template <typename T>
class ReversedAdapter {
public:
typedef decltype(static_cast<T*>(nullptr)->rbegin()) Iterator;
explicit ReversedAdapter(T& t) : t_(t) {}
ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
Iterator begin() const { return t_.rbegin(); }
Iterator end() const { return t_.rend(); }
private:
T& t_;
DISALLOW_ASSIGN(ReversedAdapter);
};
// Reversed returns a container adapter usable in a range-based "for" statement
// for iterating a reversible container in reverse order.
//
// Example:
//
// std::vector<int> v = ...;
// for (int i : base::Reversed(v)) {
// // iterates through v from back to front
// }
template <typename T>
ReversedAdapter<T> Reversed(T& t) {
return ReversedAdapter<T>(t);
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ADAPTERS_H_

25
deps/v8/src/base/compiler-specific.h

@ -17,31 +17,6 @@
#endif
// Annotate a virtual method indicating it must be overriding a virtual
// method in the parent class.
// Use like:
// virtual void bar() OVERRIDE;
#if V8_HAS_CXX11_OVERRIDE
#define OVERRIDE override
#else
#define OVERRIDE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating that subclasses must not override it,
// or annotate a class to indicate that it cannot be subclassed.
// Use like:
// class B FINAL : public A {};
// virtual void bar() FINAL;
#if V8_HAS_CXX11_FINAL
#define FINAL final
#elif V8_HAS___FINAL
#define FINAL __final
#else
#define FINAL /* NOT SUPPORTED */
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() WARN_UNUSED_RESULT;

45
deps/v8/src/base/cpu.cc

@ -52,22 +52,23 @@ namespace base {
#if !V8_LIBC_MSVCRT
static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
// Clear ecx to align with __cpuid() of MSVC:
// https://msdn.microsoft.com/en-us/library/hskdteyh.aspx
#if defined(__i386__) && defined(__pic__)
// Make sure to preserve ebx, which contains the pointer
// to the GOT in case we're generating PIC.
__asm__ volatile (
"mov %%ebx, %%edi\n\t"
"cpuid\n\t"
"xchg %%edi, %%ebx\n\t"
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
__asm__ volatile(
"mov %%ebx, %%edi\n\t"
"cpuid\n\t"
"xchg %%edi, %%ebx\n\t"
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
"=d"(cpu_info[3])
: "a"(info_type), "c"(0));
#else
__asm__ volatile (
"cpuid \n\t"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
: "a"(info_type)
);
__asm__ volatile("cpuid \n\t"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
"=d"(cpu_info[3])
: "a"(info_type), "c"(0));
#endif // defined(__i386__) && defined(__pic__)
}
@ -177,7 +178,7 @@ int __detect_mips_arch_revision(void) {
#endif
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo FINAL {
class CPUInfo final {
public:
CPUInfo() : datalen_(0) {
// Get the size of the cpuinfo file by reading it until the end. This is
@ -325,6 +326,10 @@ CPU::CPU()
has_osxsave_(false),
has_avx_(false),
has_fma3_(false),
has_bmi1_(false),
has_bmi2_(false),
has_lzcnt_(false),
has_popcnt_(false),
has_idiva_(false),
has_neon_(false),
has_thumb2_(false),
@ -371,6 +376,7 @@ CPU::CPU()
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
has_osxsave_ = (cpu_info[2] & 0x08000000) != 0;
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
@ -392,10 +398,13 @@ CPU::CPU()
}
}
#if V8_HOST_ARCH_IA32
// SAHF is always available in compat/legacy mode,
has_sahf_ = true;
#else
// There are separate feature flags for VEX-encoded GPR instructions.
if (num_ids >= 7) {
__cpuid(cpu_info, 7);
has_bmi1_ = (cpu_info[1] & 0x00000008) != 0;
has_bmi2_ = (cpu_info[1] & 0x00000100) != 0;
}
// Query extended IDs.
__cpuid(cpu_info, 0x80000000);
unsigned num_ext_ids = cpu_info[0];
@ -403,10 +412,10 @@ CPU::CPU()
// Interpret extended CPU feature information.
if (num_ext_ids > 0x80000000) {
__cpuid(cpu_info, 0x80000001);
has_lzcnt_ = (cpu_info[2] & 0x00000020) != 0;
// SAHF must be probed in long mode.
has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
}
#endif
#elif V8_HOST_ARCH_ARM

10
deps/v8/src/base/cpu.h

@ -28,7 +28,7 @@ namespace base {
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
class CPU FINAL {
class CPU final {
public:
CPU();
@ -86,6 +86,10 @@ class CPU FINAL {
bool has_osxsave() const { return has_osxsave_; }
bool has_avx() const { return has_avx_; }
bool has_fma3() const { return has_fma3_; }
bool has_bmi1() const { return has_bmi1_; }
bool has_bmi2() const { return has_bmi2_; }
bool has_lzcnt() const { return has_lzcnt_; }
bool has_popcnt() const { return has_popcnt_; }
bool is_atom() const { return is_atom_; }
// arm features
@ -125,6 +129,10 @@ class CPU FINAL {
bool has_osxsave_;
bool has_avx_;
bool has_fma3_;
bool has_bmi1_;
bool has_bmi2_;
bool has_lzcnt_;
bool has_popcnt_;
bool has_idiva_;
bool has_neon_;
bool has_thumb2_;

2
deps/v8/src/base/flags.h

@ -20,7 +20,7 @@ namespace base {
// other enum value and passed on to a function that takes an int or unsigned
// int.
template <typename T, typename S = int>
class Flags FINAL {
class Flags final {
public:
typedef T flag_type;
typedef S mask_type;

14
deps/v8/src/base/macros.h

@ -228,11 +228,15 @@ V8_INLINE Dest bit_cast(Source const& source) {
}
// Put this in the private: declarations for a class to be unassignable.
#define DISALLOW_ASSIGN(TypeName) void operator=(const TypeName&)
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) V8_DELETE; \
void operator=(const TypeName&) V8_DELETE
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
void operator=(const TypeName&) = delete
// A macro to disallow all the implicit constructors, namely the
@ -241,8 +245,8 @@ V8_INLINE Dest bit_cast(Source const& source) {
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName() V8_DELETE; \
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName() = delete; \
DISALLOW_COPY_AND_ASSIGN(TypeName)

10
deps/v8/src/base/platform/condition-variable.cc

@ -15,11 +15,8 @@ namespace base {
#if V8_OS_POSIX
ConditionVariable::ConditionVariable() {
// TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
// hack to support cross-compiling Chrome for Android in AOSP. Remove
// this once AOSP is fixed.
#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
(V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
(V8_OS_LINUX && V8_LIBC_GLIBC))
// On Free/Net/OpenBSD and Linux with glibc we can change the time
// source for pthread_cond_timedwait() to use the monotonic clock.
pthread_condattr_t attr;
@ -81,11 +78,8 @@ bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
result = pthread_cond_timedwait_relative_np(
&native_handle_, &mutex->native_handle(), &ts);
#else
// TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
// hack to support cross-compiling Chrome for Android in AOSP. Remove
// this once AOSP is fixed.
#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
(V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
(V8_OS_LINUX && V8_LIBC_GLIBC))
// On Free/Net/OpenBSD and Linux with glibc we can change the time
// source for pthread_cond_timedwait() to use the monotonic clock.
result = clock_gettime(CLOCK_MONOTONIC, &ts);

4
deps/v8/src/base/platform/condition-variable.h

@ -28,7 +28,7 @@ class TimeDelta;
// the mutex and suspend the execution of the calling thread. When the condition
// variable is notified, the thread is awakened, and the mutex is reacquired.
class ConditionVariable FINAL {
class ConditionVariable final {
public:
ConditionVariable();
~ConditionVariable();
@ -63,7 +63,7 @@ class ConditionVariable FINAL {
typedef pthread_cond_t NativeHandle;
#elif V8_OS_WIN
struct Event;
class NativeHandle FINAL {
class NativeHandle final {
public:
NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
~NativeHandle();

2
deps/v8/src/base/platform/elapsed-timer.h

@ -11,7 +11,7 @@
namespace v8 {
namespace base {
class ElapsedTimer FINAL {
class ElapsedTimer final {
public:
#ifdef DEBUG
ElapsedTimer() : started_(false) {}

6
deps/v8/src/base/platform/mutex.h

@ -33,7 +33,7 @@ namespace base {
// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
// while still owned by some thread. The Mutex class is non-copyable.
class Mutex FINAL {
class Mutex final {
public:
Mutex();
~Mutex();
@ -127,7 +127,7 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
// The behavior of a program is undefined if a recursive mutex is destroyed
// while still owned by some thread. The RecursiveMutex class is non-copyable.
class RecursiveMutex FINAL {
class RecursiveMutex final {
public:
RecursiveMutex();
~RecursiveMutex();
@ -199,7 +199,7 @@ typedef LazyStaticInstance<RecursiveMutex,
// The LockGuard class is non-copyable.
template <typename Mutex>
class LockGuard FINAL {
class LockGuard final {
public:
explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
~LockGuard() { mutex_->Unlock(); }

49
deps/v8/src/base/platform/platform-aix.cc

@ -73,55 +73,6 @@ void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) {
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) {}
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmapHelper(size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmapHelper(size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}

48
deps/v8/src/base/platform/platform-cygwin.cc

@ -59,54 +59,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddresses> result;
// This function assumes that the layout of the file is as follows:

48
deps/v8/src/base/platform/platform-freebsd.cc

@ -68,54 +68,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}

62
deps/v8/src/base/platform/platform-linux.cc

@ -142,64 +142,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
@ -271,7 +213,7 @@ void OS::SignalCodeMovingGC() {
// it. This injects a GC marker into the stream of events generated
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
long size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
if (f == NULL) {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
@ -286,7 +228,7 @@ void OS::SignalCodeMovingGC() {
PROT_READ | PROT_EXEC,
#endif
MAP_PRIVATE, fileno(f), 0);
DCHECK(addr != MAP_FAILED);
DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
fclose(f);
}

58
deps/v8/src/base/platform/platform-macos.cc

@ -68,64 +68,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
unsigned int images_count = _dyld_image_count();

48
deps/v8/src/base/platform/platform-openbsd.cc

@ -66,54 +66,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:

81
deps/v8/src/base/platform/platform-posix.cc

@ -219,9 +219,8 @@ size_t OS::AllocateAlignment() {
}
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
void OS::Sleep(TimeDelta interval) {
usleep(static_cast<useconds_t>(interval.InMicroseconds()));
}
@ -259,6 +258,65 @@ void OS::DebugBreak() {
}
class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
: file_(file), memory_(memory), size_(size) {}
~PosixMemoryMappedFile() final;
void* memory() const final { return memory_; }
size_t size() const final { return size_; }
private:
FILE* const file_;
void* const memory_;
size_t const size_;
};
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
if (FILE* file = fopen(name, "r+")) {
if (fseek(file, 0, SEEK_END) == 0) {
long size = ftell(file); // NOLINT(runtime/int)
if (size >= 0) {
void* const memory =
mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_WRITE,
MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, size);
}
}
}
fclose(file);
}
return nullptr;
}
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
if (FILE* file = fopen(name, "w+")) {
size_t result = fwrite(initial, 1, size, file);
if (result == size && !ferror(file)) {
void* memory = mmap(OS::GetRandomMmapAddr(), result,
PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
if (memory != MAP_FAILED) {
return new PosixMemoryMappedFile(file, memory, result);
}
}
fclose(file);
}
return nullptr;
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
int OS::GetCurrentProcessId() {
return static_cast<int>(getpid());
}
@ -285,7 +343,7 @@ int OS::GetCurrentThreadId() {
// POSIX date/time support.
//
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
#if V8_OS_NACL
// Optionally used in Logger::ResourceEvent.
return -1;
@ -293,8 +351,8 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
*secs = usage.ru_utime.tv_sec;
*usecs = usage.ru_utime.tv_usec;
*secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
*usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
return 0;
#endif
}
@ -572,13 +630,6 @@ void Thread::Join() {
}
void Thread::YieldCPU() {
int result = sched_yield();
DCHECK_EQ(0, result);
USE(result);
}
static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
#if V8_OS_CYGWIN
// We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
@ -702,5 +753,5 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
} } // namespace v8::base
} // namespace base
} // namespace v8

58
deps/v8/src/base/platform/platform-qnx.cc

@ -117,64 +117,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(OS::GetRandomMmapAddr(),
size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
fileno(file),
0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) OS::Free(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
procfs_mapinfo *mapinfos = NULL, *mapinfo;

48
deps/v8/src/base/platform/platform-solaris.cc

@ -63,54 +63,6 @@ void* OS::Allocate(const size_t requested,
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
int size = ftell(file);
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return std::vector<SharedLibraryAddress>();
}

76
deps/v8/src/base/platform/platform-win32.cc

@ -733,15 +733,17 @@ void* OS::GetRandomMmapAddr() {
// Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT
static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else
static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
uintptr_t address =
(platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) |
kAllocationRandomAddressMin;
uintptr_t address;
platform_random_number_generator.Pointer()->NextBytes(&address,
sizeof(address));
address <<= kPageSizeBits;
address += kAllocationRandomAddressMin;
address &= kAllocationRandomAddressMax;
return reinterpret_cast<void *>(address);
}
@ -810,8 +812,8 @@ void OS::Guard(void* address, const size_t size) {
}
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
void OS::Sleep(TimeDelta interval) {
::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
}
@ -836,38 +838,38 @@ void OS::DebugBreak() {
}
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
class Win32MemoryMappedFile final : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file,
HANDLE file_mapping,
void* memory,
int size)
Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory,
size_t size)
: file_(file),
file_mapping_(file_mapping),
memory_(memory),
size_(size) { }
virtual ~Win32MemoryMappedFile();
virtual void* memory() { return memory_; }
virtual int size() { return size_; }
size_(size) {}
~Win32MemoryMappedFile() final;
void* memory() const final { return memory_; }
size_t size() const final { return size_; }
private:
HANDLE file_;
HANDLE file_mapping_;
void* memory_;
int size_;
HANDLE const file_;
HANDLE const file_mapping_;
void* const memory_;
size_t const size_;
};
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
if (file == INVALID_HANDLE_VALUE) return NULL;
int size = static_cast<int>(GetFileSize(file, NULL));
DWORD size = GetFileSize(file, NULL);
// Create a file mapping for the physical file
HANDLE file_mapping = CreateFileMapping(file, NULL,
PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
HANDLE file_mapping =
CreateFileMapping(file, NULL, PAGE_READWRITE, 0, size, NULL);
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
@ -876,15 +878,17 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
}
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
// static
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
size_t size, void* initial) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL,
OPEN_ALWAYS, 0, NULL);
if (file == NULL) return NULL;
// Create a file mapping for the physical file
HANDLE file_mapping = CreateFileMapping(file, NULL,
PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
HANDLE file_mapping = CreateFileMapping(file, NULL, PAGE_READWRITE, 0,
static_cast<DWORD>(size), NULL);
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
@ -894,8 +898,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
Win32MemoryMappedFile::~Win32MemoryMappedFile() {
if (memory_ != NULL)
UnmapViewOfFile(memory_);
if (memory_) UnmapViewOfFile(memory_);
CloseHandle(file_mapping_);
CloseHandle(file_);
}
@ -1381,10 +1384,5 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
DCHECK(result);
}
void Thread::YieldCPU() {
Sleep(0);
}
} } // namespace v8::base
} // namespace base
} // namespace v8

18
deps/v8/src/base/platform/platform.h

@ -190,8 +190,8 @@ class OS {
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
// Sleep for a specified time interval.
static void Sleep(TimeDelta interval);
// Abort the current process.
static void Abort();
@ -210,11 +210,13 @@ class OS {
class MemoryMappedFile {
public:
virtual ~MemoryMappedFile() {}
virtual void* memory() const = 0;
virtual size_t size() const = 0;
static MemoryMappedFile* open(const char* name);
static MemoryMappedFile* create(const char* name, int size, void* initial);
virtual ~MemoryMappedFile() { }
virtual void* memory() = 0;
virtual int size() = 0;
static MemoryMappedFile* create(const char* name, size_t size,
void* initial);
};
// Safe formatting print. Ensures that str is always null-terminated.
@ -444,10 +446,6 @@ class Thread {
}
#endif
// A hint to the scheduler to let another thread run.
static void YieldCPU();
// The thread name length is limited to 16 based on Linux's implementation of
// prctl().
static const int kMaxThreadNameLength = 16;

2
deps/v8/src/base/platform/semaphore.h

@ -31,7 +31,7 @@ class TimeDelta;
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
class Semaphore FINAL {
class Semaphore final {
public:
explicit Semaphore(int count);
~Semaphore();

33
deps/v8/src/base/platform/time.cc

@ -133,7 +133,7 @@ TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
struct timespec TimeDelta::ToTimespec() const {
struct timespec ts;
ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond);
ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
Time::kNanosecondsPerMicrosecond;
return ts;
@ -147,7 +147,7 @@ struct timespec TimeDelta::ToTimespec() const {
// We implement time using the high-resolution timers so that we can get
// timeouts which are smaller than 10-15ms. To avoid any drift, we
// periodically resync the internal clock to the system clock.
class Clock FINAL {
class Clock final {
public:
Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
@ -292,7 +292,7 @@ struct timespec Time::ToTimespec() const {
ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT
return ts;
}
ts.tv_sec = us_ / kMicrosecondsPerSecond;
ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
return ts;
}
@ -324,7 +324,7 @@ struct timeval Time::ToTimeval() const {
tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
return tv;
}
tv.tv_sec = us_ / kMicrosecondsPerSecond;
tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond);
tv.tv_usec = us_ % kMicrosecondsPerSecond;
return tv;
}
@ -399,7 +399,7 @@ class TickClock {
// (3) System time. The system time provides a low-resolution (typically 10ms
// to 55 milliseconds) time stamp but is comparatively less expensive to
// retrieve and more reliable.
class HighResolutionTickClock FINAL : public TickClock {
class HighResolutionTickClock final : public TickClock {
public:
explicit HighResolutionTickClock(int64_t ticks_per_second)
: ticks_per_second_(ticks_per_second) {
@ -407,7 +407,7 @@ class HighResolutionTickClock FINAL : public TickClock {
}
virtual ~HighResolutionTickClock() {}
int64_t Now() OVERRIDE {
int64_t Now() override {
LARGE_INTEGER now;
BOOL result = QueryPerformanceCounter(&now);
DCHECK(result);
@ -425,21 +425,21 @@ class HighResolutionTickClock FINAL : public TickClock {
return ticks + 1;
}
bool IsHighResolution() OVERRIDE { return true; }
bool IsHighResolution() override { return true; }
private:
int64_t ticks_per_second_;
};
class RolloverProtectedTickClock FINAL : public TickClock {
class RolloverProtectedTickClock final : public TickClock {
public:
// We initialize rollover_ms_ to 1 to ensure that we will never
// return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
virtual ~RolloverProtectedTickClock() {}
int64_t Now() OVERRIDE {
int64_t Now() override {
LockGuard<Mutex> lock_guard(&mutex_);
// We use timeGetTime() to implement TimeTicks::Now(), which rolls over
// every ~49.7 days. We try to track rollover ourselves, which works if
@ -458,7 +458,7 @@ class RolloverProtectedTickClock FINAL : public TickClock {
return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
}
bool IsHighResolution() OVERRIDE { return false; }
bool IsHighResolution() override { return false; }
private:
Mutex mutex_;
@ -548,15 +548,6 @@ TimeTicks TimeTicks::HighResolutionNow() {
info.numer / info.denom);
#elif V8_OS_SOLARIS
ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
#elif V8_LIBRT_NOT_AVAILABLE
// TODO(bmeurer): This is a temporary hack to support cross-compiling
// Chrome for Android in AOSP. Remove this once AOSP is fixed, also
// cleanup the tools/gyp/v8.gyp file.
struct timeval tv;
int result = gettimeofday(&tv, NULL);
DCHECK_EQ(0, result);
USE(result);
ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
#elif V8_OS_POSIX
struct timespec ts;
int result = clock_gettime(CLOCK_MONOTONIC, &ts);
@ -576,7 +567,7 @@ bool TimeTicks::IsHighResolutionClockWorking() {
}
#if V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
#if V8_OS_LINUX
class KernelTimestampClock {
public:
@ -632,7 +623,7 @@ class KernelTimestampClock {
bool Available() { return false; }
};
#endif // V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
#endif // V8_OS_LINUX
static LazyStaticInstance<KernelTimestampClock,
DefaultConstructTrait<KernelTimestampClock>,

6
deps/v8/src/base/platform/time.h

@ -31,7 +31,7 @@ class TimeTicks;
// This class represents a duration of time, internally represented in
// microseonds.
class TimeDelta FINAL {
class TimeDelta final {
public:
TimeDelta() : delta_(0) {}
@ -159,7 +159,7 @@ class TimeDelta FINAL {
// This class represents an absolute point in time, internally represented as
// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
class Time FINAL {
class Time final {
public:
static const int64_t kMillisecondsPerSecond = 1000;
static const int64_t kMicrosecondsPerMillisecond = 1000;
@ -298,7 +298,7 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
// Time::Now() may actually decrease or jump). But note that TimeTicks may
// "stand still", for example if the computer suspended.
class TimeTicks FINAL {
class TimeTicks final {
public:
TimeTicks() : ticks_(0) {}

2
deps/v8/src/base/sys-info.h

@ -11,7 +11,7 @@
namespace v8 {
namespace base {
class SysInfo FINAL {
class SysInfo final {
public:
// Returns the number of logical processors/core on the current machine.
static int NumberOfProcessors();

2
deps/v8/src/base/utils/random-number-generator.h

@ -25,7 +25,7 @@ namespace base {
// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
class RandomNumberGenerator FINAL {
class RandomNumberGenerator final {
public:
// EntropySource is used as a callback function when V8 needs a source of
// entropy.

7
deps/v8/src/bit-vector.h

@ -66,7 +66,7 @@ class BitVector : public ZoneObject {
: length_(length),
data_length_(SizeFor(length)),
data_(zone->NewArray<uintptr_t>(data_length_)) {
DCHECK(length > 0);
DCHECK_LE(0, length);
Clear();
}
@ -77,7 +77,10 @@ class BitVector : public ZoneObject {
CopyFrom(other);
}
static int SizeFor(int length) { return 1 + ((length - 1) / kDataBits); }
static int SizeFor(int length) {
if (length == 0) return 1;
return 1 + ((length - 1) / kDataBits);
}
void CopyFrom(const BitVector& other) {
DCHECK(other.length() <= length());

590
deps/v8/src/bootstrapper.cc

File diff suppressed because it is too large

15
deps/v8/src/bootstrapper.h

@ -15,7 +15,7 @@ namespace internal {
// (runtime.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
class SourceCodeCache FINAL BASE_EMBEDDED {
class SourceCodeCache final BASE_EMBEDDED {
public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
@ -64,7 +64,7 @@ class SourceCodeCache FINAL BASE_EMBEDDED {
// The Boostrapper is the public interface for creating a JavaScript global
// context.
class Bootstrapper FINAL {
class Bootstrapper final {
public:
static void InitializeOncePerProcess();
static void TearDownExtensions();
@ -87,7 +87,8 @@ class Bootstrapper FINAL {
void Iterate(ObjectVisitor* v);
// Accessor for the native scripts source code.
Handle<String> NativesSourceLookup(int index);
template <class Source>
Handle<String> SourceLookup(int index);
// Tells whether bootstrapping is active.
bool IsActive() const { return nesting_ != 0; }
@ -126,7 +127,7 @@ class Bootstrapper FINAL {
};
class BootstrapperActive FINAL BASE_EMBEDDED {
class BootstrapperActive final BASE_EMBEDDED {
public:
explicit BootstrapperActive(Bootstrapper* bootstrapper)
: bootstrapper_(bootstrapper) {
@ -144,13 +145,13 @@ class BootstrapperActive FINAL BASE_EMBEDDED {
};
class NativesExternalStringResource FINAL
class NativesExternalStringResource final
: public v8::String::ExternalOneByteStringResource {
public:
NativesExternalStringResource(const char* source, size_t length)
: data_(source), length_(length) {}
const char* data() const OVERRIDE { return data_; }
size_t length() const OVERRIDE { return length_; }
const char* data() const override { return data_; }
size_t length() const override { return length_; }
private:
const char* data_;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save