Browse Source

deps: upgrade v8 to 3.31.74.1

PR-URL: https://github.com/iojs/io.js/pull/243
Reviewed-By: Fedor Indutny <fedor@indutny.com>
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
v1.8.0-commit
Ben Noordhuis 10 years ago
parent
commit
dad73f645c
  1. 113
      deps/v8/.DEPS.git
  2. 3
      deps/v8/.gitignore
  3. 73
      deps/v8/BUILD.gn
  4. 518
      deps/v8/ChangeLog
  5. 39
      deps/v8/DEPS
  6. 13
      deps/v8/Makefile
  7. 61
      deps/v8/Makefile.android
  8. 1
      deps/v8/OWNERS
  9. 2
      deps/v8/PRESUBMIT.py
  10. 11
      deps/v8/README.md
  11. 21
      deps/v8/build/android.gypi
  12. 18
      deps/v8/build/features.gypi
  13. 34
      deps/v8/build/standalone.gypi
  14. 500
      deps/v8/build/toolchain.gypi
  15. 3
      deps/v8/codereview.settings
  16. 394
      deps/v8/include/v8.h
  17. 11
      deps/v8/include/v8config.h
  18. 385
      deps/v8/samples/lineprocessor.cc
  19. 25
      deps/v8/samples/process.cc
  20. 6
      deps/v8/samples/samples.gyp
  21. 4
      deps/v8/samples/shell.cc
  22. 94
      deps/v8/src/accessors.cc
  23. 1
      deps/v8/src/accessors.h
  24. 396
      deps/v8/src/api.cc
  25. 47
      deps/v8/src/arguments.h
  26. 168
      deps/v8/src/arm/assembler-arm.cc
  27. 17
      deps/v8/src/arm/assembler-arm.h
  28. 16
      deps/v8/src/arm/builtins-arm.cc
  29. 199
      deps/v8/src/arm/code-stubs-arm.cc
  30. 50
      deps/v8/src/arm/code-stubs-arm.h
  31. 8
      deps/v8/src/arm/codegen-arm.cc
  32. 4
      deps/v8/src/arm/constants-arm.h
  33. 2
      deps/v8/src/arm/cpu-arm.cc
  34. 6
      deps/v8/src/arm/deoptimizer-arm.cc
  35. 160
      deps/v8/src/arm/disasm-arm.cc
  36. 112
      deps/v8/src/arm/full-codegen-arm.cc
  37. 28
      deps/v8/src/arm/lithium-arm.cc
  38. 162
      deps/v8/src/arm/lithium-arm.h
  39. 183
      deps/v8/src/arm/lithium-codegen-arm.cc
  40. 45
      deps/v8/src/arm/macro-assembler-arm.cc
  41. 24
      deps/v8/src/arm/macro-assembler-arm.h
  42. 198
      deps/v8/src/arm/simulator-arm.cc
  43. 4
      deps/v8/src/arm64/assembler-arm64-inl.h
  44. 30
      deps/v8/src/arm64/assembler-arm64.cc
  45. 16
      deps/v8/src/arm64/builtins-arm64.cc
  46. 163
      deps/v8/src/arm64/code-stubs-arm64.cc
  47. 12
      deps/v8/src/arm64/code-stubs-arm64.h
  48. 2
      deps/v8/src/arm64/cpu-arm64.cc
  49. 5
      deps/v8/src/arm64/deoptimizer-arm64.cc
  50. 112
      deps/v8/src/arm64/full-codegen-arm64.cc
  51. 18
      deps/v8/src/arm64/lithium-arm64.cc
  52. 163
      deps/v8/src/arm64/lithium-arm64.h
  53. 121
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  54. 15
      deps/v8/src/arm64/macro-assembler-arm64-inl.h
  55. 47
      deps/v8/src/arm64/macro-assembler-arm64.cc
  56. 35
      deps/v8/src/arm64/macro-assembler-arm64.h
  57. 2
      deps/v8/src/arm64/simulator-arm64.cc
  58. 2
      deps/v8/src/array-iterator.js
  59. 59
      deps/v8/src/array.js
  60. 125
      deps/v8/src/ast-numbering.cc
  61. 239
      deps/v8/src/ast-this-access-visitor.cc
  62. 34
      deps/v8/src/ast-this-access-visitor.h
  63. 15
      deps/v8/src/ast-value-factory.cc
  64. 12
      deps/v8/src/ast-value-factory.h
  65. 149
      deps/v8/src/ast.cc
  66. 835
      deps/v8/src/ast.h
  67. 86
      deps/v8/src/base/cpu.cc
  68. 7
      deps/v8/src/base/cpu.h
  69. 56
      deps/v8/src/base/iterator.h
  70. 6
      deps/v8/src/base/macros.h
  71. 2
      deps/v8/src/base/platform/platform-linux.cc
  72. 3
      deps/v8/src/base/platform/platform-posix.cc
  73. 43
      deps/v8/src/base/platform/platform-win32.cc
  74. 12
      deps/v8/src/base/platform/time.cc
  75. 8
      deps/v8/src/base/sys-info.cc
  76. 212
      deps/v8/src/bootstrapper.cc
  77. 4
      deps/v8/src/bootstrapper.h
  78. 14
      deps/v8/src/builtins.cc
  79. 6
      deps/v8/src/checks.h
  80. 485
      deps/v8/src/code-stubs-hydrogen.cc
  81. 1
      deps/v8/src/code-stubs.cc
  82. 347
      deps/v8/src/code-stubs.h
  83. 2
      deps/v8/src/collection-iterator.js
  84. 26
      deps/v8/src/collection.js
  85. 205
      deps/v8/src/compiler.cc
  86. 23
      deps/v8/src/compiler.h
  87. 44
      deps/v8/src/compiler/access-builder.cc
  88. 3
      deps/v8/src/compiler/access-builder.h
  89. 275
      deps/v8/src/compiler/arm/code-generator-arm.cc
  90. 8
      deps/v8/src/compiler/arm/instruction-codes-arm.h
  91. 533
      deps/v8/src/compiler/arm/instruction-selector-arm.cc
  92. 4
      deps/v8/src/compiler/arm/linkage-arm.cc
  93. 374
      deps/v8/src/compiler/arm64/code-generator-arm64.cc
  94. 9
      deps/v8/src/compiler/arm64/instruction-codes-arm64.h
  95. 169
      deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
  96. 4
      deps/v8/src/compiler/arm64/linkage-arm64.cc
  97. 138
      deps/v8/src/compiler/ast-graph-builder.cc
  98. 40
      deps/v8/src/compiler/ast-graph-builder.h
  99. 2
      deps/v8/src/compiler/ast-loop-assignment-analyzer.h
  100. 5
      deps/v8/src/compiler/basic-block-instrumentor.cc

113
deps/v8/.DEPS.git

@ -1,113 +0,0 @@
# DO NOT EDIT EXCEPT FOR LOCAL TESTING.
# THIS IS A GENERATED FILE.
# ALL MANUAL CHANGES WILL BE OVERWRITTEN.
# SEE http://code.google.com/p/chromium/wiki/UsingGit
# FOR HOW TO ROLL DEPS
vars = {
'webkit_url':
'https://chromium.googlesource.com/chromium/blink.git',
'git_url':
'https://chromium.googlesource.com'
}
deps = {
'v8/build/gyp':
Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852',
'v8/buildtools':
Var('git_url') + '/chromium/buildtools.git@fb782d4369d5ae04f17a2fceef7de5a63e50f07b',
'v8/testing/gmock':
Var('git_url') + '/external/googlemock.git@896ba0e03f520fb9b6ed582bde2bd00847e3c3f2',
'v8/testing/gtest':
Var('git_url') + '/external/googletest.git@4650552ff637bb44ecf7784060091cbed3252211',
'v8/third_party/icu':
Var('git_url') + '/chromium/deps/icu52.git@26d8859357ac0bfb86b939bf21c087b8eae22494',
}
deps_os = {
'android':
{
'v8/third_party/android_tools':
Var('git_url') + '/android_tools.git@31869996507de16812bb53a3d0aaa15cd6194c16',
},
'win':
{
'v8/third_party/cygwin':
Var('git_url') + '/chromium/deps/cygwin.git@06a117a90c15174436bfa20ceebbfdf43b7eb820',
'v8/third_party/python_26':
Var('git_url') + '/chromium/deps/python_26.git@67d19f904470effe3122d27101cc5a8195abd157',
},
}
include_rules = [
'+include',
'+unicode',
'+third_party/fdlibm'
]
skip_child_includes = [
'build',
'third_party'
]
hooks = [
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=win32',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/win/clang-format.exe.sha1'
],
'pattern':
'.',
'name':
'clang_format_win'
},
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=darwin',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/mac/clang-format.sha1'
],
'pattern':
'.',
'name':
'clang_format_mac'
},
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=linux*',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/linux64/clang-format.sha1'
],
'pattern':
'.',
'name':
'clang_format_linux'
},
{
'action':
[
'python',
'v8/build/gyp_v8'
],
'pattern':
'.'
}
]

3
deps/v8/.gitignore

@ -66,8 +66,11 @@ shell_g
/test/test262-es6/tc39-test262-*
/testing/gmock
/testing/gtest
/third_party
/third_party/icu
/third_party/llvm
/third_party/llvm-build
/tools/clang
/tools/jsfunfuzz
/tools/jsfunfuzz.zip
/tools/oom_dump/oom_dump

73
deps/v8/BUILD.gn

@ -2,9 +2,12 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Because standalone V8 builds are not supported, assume this is part of a
# Chromium build.
import("//build/module_args/v8.gni")
# TODO(jochen): These will need to be user-settable to support standalone V8
# builds.
v8_compress_startup_data = "off"
v8_deprecation_warnings = false
v8_enable_disassembler = false
v8_enable_gdbjit = false
@ -15,7 +18,6 @@ v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_snapshot = true
v8_use_external_startup_data = false
v8_enable_extra_checks = is_debug
v8_target_arch = cpu_arch
v8_random_seed = "314159265"
@ -95,11 +97,6 @@ config("features") {
"V8_I18N_SUPPORT",
]
}
if (v8_compress_startup_data == "bz2") {
defines += [
"COMPRESS_STARTUP_DATA_BZ2",
]
}
if (v8_enable_extra_checks == true) {
defines += [
"ENABLE_EXTRA_CHECKS",
@ -216,7 +213,6 @@ action("js2c") {
args = [
rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
"CORE",
v8_compress_startup_data
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
@ -243,9 +239,12 @@ action("js2c_experimental") {
"src/generator.js",
"src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
"src/harmony-classes.js",
"src/harmony-tostring.js"
"src/harmony-tostring.js",
"src/harmony-templates.js",
"src/harmony-regexp.js"
]
outputs = [
@ -255,7 +254,6 @@ action("js2c_experimental") {
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir),
"EXPERIMENTAL",
v8_compress_startup_data
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
@ -282,7 +280,7 @@ if (v8_use_external_startup_data) {
]
outputs = [
"$root_gen_dir/natives_blob.bin"
"$root_out_dir/natives_blob.bin"
]
script = "tools/concatenate-files.py"
@ -335,10 +333,10 @@ action("run_mksnapshot") {
}
if (v8_use_external_startup_data) {
outputs += [ "$root_gen_dir/snapshot_blob.bin" ]
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
"--startup_blob",
rebase_path("$root_gen_dir/snapshot_blob.bin", root_build_dir)
rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir)
]
}
}
@ -361,7 +359,6 @@ source_set("v8_nosnapshot") {
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"src/snapshot-empty.cc",
"src/snapshot-common.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@ -383,7 +380,6 @@ source_set("v8_snapshot") {
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/snapshot-common.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@ -436,6 +432,8 @@ source_set("v8_base") {
"src/assert-scope.cc",
"src/ast-numbering.cc",
"src/ast-numbering.h",
"src/ast-this-access-visitor.cc",
"src/ast-this-access-visitor.h",
"src/ast-value-factory.cc",
"src/ast-value-factory.h",
"src/ast.cc",
@ -491,22 +489,22 @@ source_set("v8_base") {
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.cc",
"src/compiler/common-operator.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/diamond.h",
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/generic-algorithm-inl.h",
"src/compiler/generic-algorithm.h",
"src/compiler/generic-graph.h",
"src/compiler/generic-node-inl.h",
"src/compiler/generic-node.h",
"src/compiler/graph-builder.cc",
"src/compiler/graph-builder.h",
"src/compiler/graph-inl.h",
@ -540,15 +538,23 @@ source_set("v8_base") {
"src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
"src/compiler/jump-threading.h",
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-analysis.cc",
"src/compiler/loop-analysis.h",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
"src/compiler/machine-type.cc",
"src/compiler/machine-type.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
"src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
@ -558,12 +564,12 @@ source_set("v8_base") {
"src/compiler/node-properties.h",
"src/compiler/node.cc",
"src/compiler/node.h",
"src/compiler/opcodes.cc",
"src/compiler/opcodes.h",
"src/compiler/operator-properties-inl.h",
"src/compiler/operator-properties.cc",
"src/compiler/operator-properties.h",
"src/compiler/operator.cc",
"src/compiler/operator.h",
"src/compiler/phi-reducer.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
@ -572,6 +578,8 @@ source_set("v8_base") {
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/register-configuration.cc",
"src/compiler/register-configuration.h",
"src/compiler/representation-change.h",
@ -780,6 +788,9 @@ source_set("v8_base") {
"src/jsregexp-inl.h",
"src/jsregexp.cc",
"src/jsregexp.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
"src/list-inl.h",
"src/list.h",
"src/lithium-allocator-inl.h",
@ -873,7 +884,6 @@ source_set("v8_base") {
"src/runtime/runtime-utils.h",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/runtime/string-builder.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
"src/sampler.cc",
@ -890,9 +900,12 @@ source_set("v8_base") {
"src/serialize.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot-common.cc",
"src/snapshot-source-sink.cc",
"src/snapshot-source-sink.h",
"src/snapshot.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
@ -1210,11 +1223,6 @@ source_set("v8_base") {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
cflags = [ "/wd4267" ]
}
if (is_linux) {
if (v8_compress_startup_data == "bz2") {
libs += [ "bz2" ]
}
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
@ -1260,6 +1268,7 @@ source_set("v8_libbase") {
"src/base/flags.h",
"src/base/functional.cc",
"src/base/functional.h",
"src/base/iterator.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
"src/base/logging.h",
@ -1388,10 +1397,6 @@ if (current_toolchain == host_toolchain) {
":v8_nosnapshot",
"//build/config/sanitizers:deps",
]
if (v8_compress_startup_data == "bz2") {
libs = [ "bz2" ]
}
}
}
@ -1406,7 +1411,7 @@ component("v8") {
"src/v8dll-main.cc",
]
if (v8_use_external_startup_data) {
if (v8_use_snapshot && v8_use_external_startup_data) {
deps = [
":v8_base",
":v8_external_snapshot",
@ -1417,6 +1422,7 @@ component("v8") {
":v8_snapshot",
]
} else {
assert(!v8_use_external_startup_data)
deps = [
":v8_base",
":v8_nosnapshot",
@ -1438,7 +1444,7 @@ component("v8") {
} else {
group("v8") {
if (v8_use_external_startup_data) {
if (v8_use_snapshot && v8_use_external_startup_data) {
deps = [
":v8_base",
":v8_external_snapshot",
@ -1449,6 +1455,7 @@ group("v8") {
":v8_snapshot",
]
} else {
assert(!v8_use_external_startup_data)
deps = [
":v8_base",
":v8_nosnapshot",

518
deps/v8/ChangeLog

@ -1,3 +1,521 @@
2014-12-23: Version 3.31.74
[turbofan] Turn DCHECK for fixed slot index into a CHECK (Chromium issue
444681).
Performance and stability improvements on all platforms.
2014-12-23: Version 3.31.73
[turbofan] Fix missing ChangeUint32ToUint64 in lowering of LoadBuffer
(Chromium issue 444695).
Enable the embedder to specify what kind of context was disposed.
Performance and stability improvements on all platforms.
2014-12-22: Version 3.31.72
[turbofan] Correctify lowering of Uint8ClampedArray buffer access
(Chromium issue 444508).
Performance and stability improvements on all platforms.
2014-12-20: Version 3.31.71
Performance and stability improvements on all platforms.
2014-12-20: Version 3.31.70
Performance and stability improvements on all platforms.
2014-12-20: Version 3.31.69
Performance and stability improvements on all platforms.
2014-12-19: Version 3.31.68
[turbofan] Fix unsafe out-of-bounds check for checked loads/stores
(Chromium issue 443744).
Performance and stability improvements on all platforms.
2014-12-19: Version 3.31.67
Performance and stability improvements on all platforms.
2014-12-19: Version 3.31.66
Ship ES6 template literals (issue 3230).
Performance and stability improvements on all platforms.
2014-12-18: Version 3.31.65
ES6 template literals should not use legacy octal strings (issue 3736).
Performance and stability improvements on all platforms.
2014-12-18: Version 3.31.64
Fixed -fsanitize=float-cast-overflow problems (issue 3773).
Performance and stability improvements on all platforms.
2014-12-18: Version 3.31.63
ES6 computed property names (issue 3754).
Performance and stability improvements on all platforms.
2014-12-17: Version 3.31.62
Performance and stability improvements on all platforms.
2014-12-17: Version 3.31.61
ES6: Update unscopables to match spec (issue 3632).
ES6 computed property names (issue 3754).
More -fsanitize=vptr fixes (Chromium issue 441099).
[turbofan] Cache conversions inserted during typed lowering (issue
3763).
Performance and stability improvements on all platforms.
2014-12-16: Version 3.31.60
Performance and stability improvements on all platforms.
2014-12-16: Version 3.31.59
Performance and stability improvements on all platforms.
2014-12-16: Version 3.31.58
Ship ES6 classes (issue 3330).
ES6 computed property names (issue 3754).
Performance and stability improvements on all platforms.
2014-12-12: Version 3.31.57
Consistently use only one of virtual/OVERRIDE/FINAL (issue 3753).
Performance and stability improvements on all platforms.
2014-12-12: Version 3.31.56
Performance and stability improvements on all platforms.
2014-12-12: Version 3.31.55
Performance and stability improvements on all platforms.
2014-12-11: Version 3.31.54
Implement Array.from() (issue 3336).
move v8_use_external_startup_data to standalone.gypi (Chromium issue
421063).
Performance and stability improvements on all platforms.
2014-12-11: Version 3.31.53
Performance and stability improvements on all platforms.
2014-12-11: Version 3.31.52
Ship ES6 block scoping (issue 2198).
Optimize Object.seal and Object.preventExtensions (issue 3662, Chromium
issue 115960).
Add Array.prototype.includes (issue 3575).
Performance and stability improvements on all platforms.
2014-12-10: Version 3.31.51
[x64] Fix optimization for certain checked load/stores (Chromium issue
439743).
Performance and stability improvements on all platforms.
2014-12-09: Version 3.31.50
Temporarily restore make dependencies.
Performance and stability improvements on all platforms.
2014-12-09: Version 3.31.49
Performance and stability improvements on all platforms.
2014-12-09: Version 3.31.48
Performance and stability improvements on all platforms.
2014-12-09: Version 3.31.47
Temporarily restore make dependencies.
Performance and stability improvements on all platforms.
2014-12-08: Version 3.31.46
Performance and stability improvements on all platforms.
2014-12-08: Version 3.31.45
Update all DEPS to match chromium's DEPS at edb488e.
Turn on DCHECKs and other debugging code if dcheck_always_on is 1 (issue
3731).
Optimize GetPrototype.
Performance and stability improvements on all platforms.
2014-12-05: Version 3.31.44
Performance and stability improvements on all platforms.
2014-12-04: Version 3.31.43
ES6 template literals: Fix issue with template after rbrace (issue
3734).
Stage ES6 template literals (issue 3230).
Performance and stability improvements on all platforms.
2014-12-04: Version 3.31.42
Performance and stability improvements on all platforms.
2014-12-04: Version 3.31.41
Simplify template literal raw string creation (issue 3710).
Performance and stability improvements on all platforms.
2014-12-03: Version 3.31.40
Performance and stability improvements on all platforms.
2014-12-03: Version 3.31.39
Performance and stability improvements on all platforms.
2014-12-03: Version 3.31.38
Stage ES6 classes and object literal extensions (issue 3330).
Fixed environment handling for LFlooringDivI on ARM (Chromium issue
437765).
Add GetIdentityHash to v8::Name object API (Chromium issue 437416).
Set V8_CC_GNU or V8_CC_MSVC for clang in gcc / cl mode (Chromium issue
82385).
Performance and stability improvements on all platforms.
2014-12-02: Version 3.31.37
Performance and stability improvements on all platforms.
2014-12-02: Version 3.31.36
Set V8_CC_GNU or V8_CC_MSVC for clang in gcc / cl mode (Chromium issue
82385).
Performance and stability improvements on all platforms.
2014-12-02: Version 3.31.35
Performance and stability improvements on all platforms.
2014-12-01: Version 3.31.34
Performance and stability improvements on all platforms.
2014-12-01: Version 3.31.33
Performance and stability improvements on all platforms.
2014-12-01: Version 3.31.32
Performance and stability improvements on all platforms.
2014-12-01: Version 3.31.31
Performance and stability improvements on all platforms.
2014-11-29: Version 3.31.30
Performance and stability improvements on all platforms.
2014-11-28: Version 3.31.29
Stage @@toStringTag (--harmony-tostring).
Performance and stability improvements on all platforms.
2014-11-28: Version 3.31.28
Performance and stability improvements on all platforms.
2014-11-28: Version 3.31.27
Ship harmony-strings.
Performance and stability improvements on all platforms.
2014-11-28: Version 3.31.26
Abort optimization in corner case (Chromium issue 436893).
Performance and stability improvements on all platforms.
2014-11-26: Version 3.31.25
Stage ES6 block scoping (issue 2198).
Introduce legacy const slots in correct context (Chromium issue 410030).
Performance and stability improvements on all platforms.
2014-11-26: Version 3.31.24
Performance and stability improvements on all platforms.
2014-11-25: Version 3.31.23
Performance and stability improvements on all platforms.
2014-11-25: Version 3.31.22
Performance and stability improvements on all platforms.
2014-11-24: Version 3.31.21
Performance and stability improvements on all platforms.
2014-11-24: Version 3.31.20
Performance and stability improvements on all platforms.
2014-11-22: Version 3.31.19
Performance and stability improvements on all platforms.
2014-11-21: Version 3.31.18
Performance and stability improvements on all platforms.
2014-11-21: Version 3.31.17
Performance and stability improvements on all platforms.
2014-11-21: Version 3.31.16
Cache template literal callSiteObj (issue 3230).
Rename String.prototype.contains to 'includes'.
Reserve code range block for evacuation (Chromium issue 430118).
Performance and stability improvements on all platforms.
2014-11-20: Version 3.31.15
Rename String.prototype.contains to 'includes'.
Performance and stability improvements on all platforms.
2014-11-19: Version 3.31.14
Remove Weak{Map,Set}.prototype.clear.
Performance and stability improvements on all platforms.
2014-11-19: Version 3.31.13
Performance and stability improvements on all platforms.
2014-11-19: Version 3.31.12
Classes: Expand test to cover strict runtime behavior (issue 3330).
v8::String::Concat must not throw (Chromium issue 420240).
Fix disabling all break points from within the debug event callback
(Chromium issue 432493).
Performance and stability improvements on all platforms.
2014-11-18: Version 3.31.11
Performance and stability improvements on all platforms.
2014-11-17: Version 3.31.10
Performance and stability improvements on all platforms.
2014-11-17: Version 3.31.9
Expose internal properties of map/set iterators via mirrors.
Performance and stability improvements on all platforms.
2014-11-17: Version 3.31.8
Performance and stability improvements on all platforms.
2014-11-15: Version 3.31.7
Classes: Add support for stepping through default constructors (issue
3674).
Performance and stability improvements on all platforms.
2014-11-14: Version 3.31.6
Fix desugaring of let bindings in for loops to handle continue properly
(issue 3683).
Performance and stability improvements on all platforms.
2014-11-14: Version 3.31.5
Classes: Implement correct name binding (issue 3330).
Performance and stability improvements on all platforms.
2014-11-14: Version 3.31.4
Performance and stability improvements on all platforms.
2014-11-14: Version 3.31.3
Classes: Cleanup default constructor flag.
Soft fail for invalid cache data.
Implement .of() on typed arrays (issue 3578).
Performance and stability improvements on all platforms.
2014-11-13: Version 3.31.2
MIPS: Leaving a generator via an exception causes it to close (issue
3096).
MIPS: ES6: Add support for super in object literals (issue 3571).
Increase the target new space size to the max new space size (issue
3626).
Leaving a generator via an exception causes it to close (issue 3096).
Correctly compute line numbers in functions from the function
constructor (Chromium issue 109362).
Rename v8::Exception::GetMessage to CreateMessage.
Classes: Add support for arguments in default constructor (issue 3672).
ES6: Add support for super in object literals (issue 3571).
Performance and stability improvements on all platforms.
2014-11-12: Version 3.31.1
Fix has_constant_parameter_count() confusion in LReturn (Chromium issue
431602).
Performance and stability improvements on all platforms.
2014-11-05: Version 3.30.33
`1..isPrototypeOf.call(null)` should return false, not throw TypeError

39
deps/v8/DEPS

@ -3,44 +3,32 @@
# all paths in here must match this assumption.
vars = {
"chromium_git": "https://chromium.googlesource.com",
"chromium_trunk": "https://src.chromium.org/svn/trunk",
"buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
"git_url": "https://chromium.googlesource.com",
}
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1831",
Var("git_url") + "/external/gyp.git" + "@" + "fe00999dfaee449d3465a9316778434884da4fa7", # from svn revision 2010
"v8/third_party/icu":
Var("chromium_trunk") + "/deps/third_party/icu52@277999",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "51c1a4ce5f362676aa1f1cfdb5b7e52edabfa5aa",
"v8/buildtools":
"https://chromium.googlesource.com/chromium/buildtools.git@" +
Var("buildtools_revision"),
Var("git_url") + "/chromium/buildtools.git" + "@" + "23a4e2f545c7b6340d7e5a2b74801941b0a86535",
"v8/testing/gtest":
"http://googletest.googlecode.com/svn/trunk@692",
Var("git_url") + "/external/googletest.git" + "@" + "8245545b6dc9c4703e6496d1efd19e975ad2b038", # from svn revision 700
"v8/testing/gmock":
"http://googlemock.googlecode.com/svn/trunk@485",
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "90fb65e7a9a5c9d6d9613dfb0e78921c52ca9cfc",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_git") + "/android_tools.git" + "@" +
"31869996507de16812bb53a3d0aaa15cd6194c16",
Var("git_url") + "/android_tools.git" + "@" + "4f723e2a5fa5b7b8a198072ac19b92344be2b271",
},
"win": {
"v8/third_party/cygwin":
Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
Var("chromium_trunk") + "/tools/third_party/python_26@89111",
Var("git_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
}
}
@ -92,6 +80,13 @@ hooks = [
"-s", "v8/buildtools/linux64/clang-format.sha1",
],
},
{
# Pull clang if needed or requested via GYP_DEFINES.
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
'action': ['python', 'v8/tools/clang/scripts/update.py', '--if-needed'],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",

13
deps/v8/Makefile

@ -64,6 +64,10 @@ endif
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
# tracemaps=on
ifeq ($(tracemaps), on)
GYPFLAGS += -Dv8_trace_maps=1
endif
# backtrace=off
ifeq ($(backtrace), off)
GYPFLAGS += -Dv8_enable_backtrace=0
@ -78,6 +82,9 @@ endif
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
endif
ifeq ($(snapshot), external)
GYPFLAGS += -Dv8_use_external_startup_data=1
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
@ -486,7 +493,7 @@ gtags.clean:
# "dependencies" includes also dependencies required for development.
# Remember to keep these in sync with the DEPS file.
builddeps:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
svn checkout --force https://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1831
if svn info third_party/icu 2>&1 | grep -q icu46 ; then \
svn switch --force \
@ -497,9 +504,9 @@ builddeps:
https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
third_party/icu --revision 277999 ; \
fi
svn checkout --force http://googletest.googlecode.com/svn/trunk \
svn checkout --force https://googletest.googlecode.com/svn/trunk \
testing/gtest --revision 692
svn checkout --force http://googlemock.googlecode.com/svn/trunk \
svn checkout --force https://googlemock.googlecode.com/svn/trunk \
testing/gmock --revision 485
dependencies: builddeps

61
deps/v8/Makefile.android

@ -38,12 +38,10 @@ HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
$(error Host platform "${HOST_OS}" is not supported)
endif
ifeq ($(ARCH), android_arm)
@ -52,38 +50,29 @@ ifeq ($(ARCH), android_arm)
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=21
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.9
else ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=L
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.9
else
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
endif
endif
$(error Target architecture "${ARCH}" is not supported)
endif
TOOLCHAIN_PATH = \

1
deps/v8/OWNERS

@ -1,3 +1,4 @@
adamk@chromium.org
bmeurer@chromium.org
danno@chromium.org
dcarney@chromium.org

2
deps/v8/PRESUBMIT.py

@ -244,11 +244,11 @@ def GetPreferredTryMasters(project, change):
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nosnap_rel': set(['defaulttests']),
'v8_linux_nosnap_dbg': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win64_compile_rel': set(['defaulttests']),

11
deps/v8/README.md

@ -16,8 +16,15 @@ V8 Project page: https://code.google.com/p/v8/
Getting the Code
=============
V8 Git repository: https://chromium.googlesource.com/v8/v8.git
GitHub mirror: https://github.com/v8/v8-git-mirror
Checkout [depot tools](http://www.chromium.org/developers/how-tos/install-depot-tools), and run
> `fetch v8`
This will checkout V8 into the directory `v8` and fetch all of its dependencies.
To stay up to date, run
> `git pull origin`
> `gclient sync`
For fetching all branches, add the following into your remote
configuration in `.git/config`:

21
deps/v8/build/android.gypi

@ -74,13 +74,13 @@
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which in
# turn will leave out some template stuff for 'long long'. What
# we want is -std=c++11, but this is not supported by GCC 4.6 or
# Xcode 4.2
'-std=gnu++0x' ],
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
@ -93,11 +93,13 @@
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
@ -213,8 +215,7 @@
'-fno-stack-protector',
],
}],
['target_arch=="arm64" or target_arch=="x64"', {
# TODO(ulan): Enable PIE for other architectures (crbug.com/373219).
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
'cflags': [
'-fPIE',
],

18
deps/v8/build/features.gypi

@ -29,8 +29,6 @@
{
'variables': {
'v8_compress_startup_data%': 'off',
'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0,
@ -39,6 +37,8 @@
'v8_enable_verify_heap%': 0,
'v8_trace_maps%': 0,
'v8_use_snapshot%': 'true',
'v8_enable_verify_predictable%': 0,
@ -59,9 +59,8 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
'v8_use_external_startup_data%': 0,
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
},
'target_defaults': {
'conditions': [
@ -77,6 +76,9 @@
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
['v8_trace_maps==1', {
'defines': ['TRACE_MAPS',],
}],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
}],
@ -89,12 +91,12 @@
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_compress_startup_data=="bz2"', {
'defines': ['COMPRESS_STARTUP_DATA_BZ2',],
}],
['v8_use_external_startup_data==1', {
'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
}],
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
], # conditions
'configurations': {
'DebugBaseCommon': {

34
deps/v8/build/standalone.gypi

@ -33,6 +33,8 @@
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'make_clang_dir%': '../third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0,
'asan%': 0,
'tsan%': 0,
'visibility%': 'hidden',
@ -91,6 +93,12 @@
# near-release speeds.
'v8_optimized_debug%': 0,
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
# Embedders that don't use standalone.gypi will need to add
# their own default value.
'v8_use_external_startup_data%': 0,
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
@ -127,6 +135,16 @@
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
'arm_thumb': 'default',
# Default MIPS variable settings.
'mips_arch_variant%': 'r2',
# Possible values fp32, fp64, fpxx.
# fp32 - 32 32-bit FPU registers are available, doubles are placed in
# register pairs.
# fp64 - 32 64-bit FPU registers are available.
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
},
'target_defaults': {
'variables': {
@ -368,6 +386,7 @@
}], # OS=="win"
['OS=="mac"', {
'xcode_settings': {
'SDKROOT': 'macosx',
'SYMROOT': '<(DEPTH)/xcodebuild',
},
'target_defaults': {
@ -422,5 +441,20 @@
], # target_conditions
}, # target_defaults
}], # OS=="mac"
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win"', {
'make_global_settings': [
['CC', '<(make_clang_dir)/bin/clang'],
['CXX', '<(make_clang_dir)/bin/clang++'],
['CC.host', '$(CC)'],
['CXX.host', '$(CXX)'],
],
}],
['clang==1 and OS=="win"', {
'make_global_settings': [
# On Windows, gyp's ninja generator only looks at CC.
['CC', '<(make_clang_dir)/bin/clang-cl'],
],
}],
],
}

500
deps/v8/build/toolchain.gypi

@ -30,7 +30,6 @@
{
'variables': {
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'clang%': 0,
'v8_target_arch%': '<(target_arch)',
# Native Client builds currently use the V8 ARM JIT and
@ -55,17 +54,6 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
'mips_arch_variant%': 'r2',
# Possible values fp32, fp64, fpxx.
# fp32 - 32 32-bit FPU registers are available, doubles are placed in
# register pairs.
# fp64 - 32 64-bit FPU registers are available.
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
@ -278,10 +266,27 @@
'V8_TARGET_ARCH_MIPS',
],
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
]
}],
],
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'cflags': ['-EB'],
'ldflags': ['-EB'],
'conditions': [
@ -292,16 +297,11 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
@ -311,23 +311,145 @@
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32',
],
'cflags': ['-mfp32'],
}],
],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'defines': [
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
}, {
# 'v8_target_arch!=target_arch'
# Target not built with an MIPS CXX compiler (simulator build).
'conditions': [
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32',
],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': [
'FPU_MODE_FP32',
],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
],
}],
],
}],
}], #_toolset=="target"
['_toolset=="host"', {
'conditions': [
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32'
],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
]
}], #_toolset=="host"
],
}], # v8_target_arch=="mips"
['v8_target_arch=="mipsel"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'conditions': [
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
@ -343,46 +465,12 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mips"
['v8_target_arch=="mipsel"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
@ -393,16 +481,11 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
@ -412,6 +495,29 @@
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32',
],
'cflags': ['-mfp32'],
}],
],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
@ -421,18 +527,130 @@
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
'defines': [
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfp32', '-mfpxx'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
}, {
# 'v8_target_arch!=target_arch'
# Target not built with an MIPS CXX compiler (simulator build).
'conditions': [
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32',
],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': [
'FPU_MODE_FP32',
],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="loongson"', {
'defines': [
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
}],
],
}],
],
}], #_toolset=="target
['_toolset=="host"', {
'conditions': [
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP64',
],
}],
['mips_fpu_mode=="fpxx"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FPXX',
],
}],
['mips_fpu_mode=="fp32"', {
'defines': [
'_MIPS_ARCH_MIPS32R2',
'FPU_MODE_FP32'
],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
['mips_arch_variant=="loongson"', {
'defines': [
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
}],
]
}],
],
}], # v8_target_arch=="mipsel"
['v8_target_arch=="mips64el"', {
'defines': [
'V8_TARGET_ARCH_MIPS64',
],
'conditions': [
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
@ -448,52 +666,11 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
],
'target_conditions': [
['_toolset=="target"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
['mips_arch_variant=="loongson"', {
'defines': [
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
}],
],
}], # v8_target_arch=="mipsel"
['v8_target_arch=="mips64el"', {
'defines': [
'V8_TARGET_ARCH_MIPS64',
],
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
['v8_target_arch==target_arch and android_webview_build==0', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
@ -505,6 +682,7 @@
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [
'-mips64r6', '-mabi=64',
@ -513,6 +691,7 @@
],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-mips64r2', '-mabi=64',
@ -521,30 +700,30 @@
],
}],
],
}, {
# 'v8_target_arch!=target_arch'
# Target not built with an MIPS CXX compiler (simulator build).
'conditions': [
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
],
}],
],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
}], #'_toolset=="target"
['_toolset=="host"', {
'conditions': [
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
],
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
}], #'_toolset=="host"
],
}], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
@ -724,6 +903,9 @@
},
}],
],
'defines': [
'ENABLE_SLOW_DCHECKS',
],
}, # DebugBase0
# Abstract configuration for v8_optimized_debug == 1.
'DebugBase1': {
@ -748,6 +930,9 @@
'LinkIncremental': '2',
},
},
'defines': [
'ENABLE_SLOW_DCHECKS',
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', {
@ -762,14 +947,6 @@
'-ffunction-sections',
'-O1', # TODO(2807) should be -O3.
],
'conditions': [
['gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
@ -816,9 +993,6 @@
'-fdata-sections',
'-ffunction-sections',
],
'defines': [
'OPTIMIZED_DEBUG'
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
@ -828,12 +1002,6 @@
'cflags': ['-O2'],
'cflags!': ['-O3'],
}],
['gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
}],
],
}],
['OS=="mac"', {
@ -852,7 +1020,8 @@
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG'
'DEBUG',
'TRACE_MAPS'
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
@ -873,6 +1042,7 @@
# TODO(2304): pass DISABLE_DEBUG_ASSERT instead of hiding DEBUG.
'defines!': [
'DEBUG',
'ENABLE_SLOW_DCHECKS',
],
}],
],
@ -905,12 +1075,6 @@
'<(wno_array_bounds)',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
}],
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
'cflags': ['-O3'],
@ -931,14 +1095,6 @@
'-ffunction-sections',
'-O2',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
}],
],
}],
['OS=="mac"', {
'xcode_settings': {

3
deps/v8/codereview.settings

@ -1,8 +1,9 @@
CODE_REVIEW_SERVER: https://codereview.chromium.org
CC_LIST: v8-dev@googlegroups.com
VIEW_VC: https://code.google.com/p/v8/source/detail?r=
VIEW_VC: https://chromium.googlesource.com/v8/v8/+/
STATUS: http://v8-status.appspot.com/status
TRY_ON_UPLOAD: False
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
TRYSERVER_ROOT: v8
PROJECT: v8
PENDING_REF_PREFIX: refs/pending/

394
deps/v8/include/v8.h

@ -140,6 +140,17 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
class CallbackData {
public:
V8_INLINE v8::Isolate* GetIsolate() const { return isolate_; }
protected:
explicit CallbackData(v8::Isolate* isolate) : isolate_(isolate) {}
private:
v8::Isolate* isolate_;
};
}
@ -418,22 +429,53 @@ template <class T> class Eternal {
};
template<class T, class P>
class WeakCallbackData {
template <typename T>
class PhantomCallbackData : public internal::CallbackData {
public:
typedef void (*Callback)(const PhantomCallbackData<T>& data);
V8_INLINE T* GetParameter() const { return parameter_; }
PhantomCallbackData<T>(Isolate* isolate, T* parameter)
: internal::CallbackData(isolate), parameter_(parameter) {}
private:
T* parameter_;
};
template <class T, class P>
class WeakCallbackData : public PhantomCallbackData<P> {
public:
typedef void (*Callback)(const WeakCallbackData<T, P>& data);
V8_INLINE Isolate* GetIsolate() const { return isolate_; }
V8_INLINE Local<T> GetValue() const { return handle_; }
V8_INLINE P* GetParameter() const { return parameter_; }
private:
friend class internal::GlobalHandles;
WeakCallbackData(Isolate* isolate, Local<T> handle, P* parameter)
: isolate_(isolate), handle_(handle), parameter_(parameter) { }
Isolate* isolate_;
WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
: PhantomCallbackData<P>(isolate, parameter), handle_(handle) {}
Local<T> handle_;
P* parameter_;
};
template <typename T, typename U>
class InternalFieldsCallbackData : public internal::CallbackData {
public:
typedef void (*Callback)(const InternalFieldsCallbackData<T, U>& data);
InternalFieldsCallbackData(Isolate* isolate, T* internalField1,
U* internalField2)
: internal::CallbackData(isolate),
internal_field1_(internalField1),
internal_field2_(internalField2) {}
V8_INLINE T* GetInternalField1() const { return internal_field1_; }
V8_INLINE U* GetInternalField2() const { return internal_field2_; }
private:
T* internal_field1_;
U* internal_field2_;
};
@ -471,22 +513,23 @@ template <class T> class PersistentBase {
template <class S>
V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
V8_INLINE bool IsEmpty() const { return val_ == 0; }
V8_INLINE bool IsEmpty() const { return val_ == NULL; }
V8_INLINE void Empty() { val_ = 0; }
template <class S>
V8_INLINE bool operator==(const PersistentBase<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
if (a == NULL) return b == NULL;
if (b == NULL) return false;
return *a == *b;
}
template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
if (a == 0) return b == 0;
if (b == 0) return false;
if (a == NULL) return b == NULL;
if (b == NULL) return false;
return *a == *b;
}
@ -519,14 +562,17 @@ template <class T> class PersistentBase {
// Phantom persistents work like weak persistents, except that the pointer to
// the object being collected is not available in the finalization callback.
// This enables the garbage collector to collect the object and any objects
// it references transitively in one GC cycle.
// it references transitively in one GC cycle. At the moment you can either
// specify a parameter for the callback or the location of two internal
// fields in the dying object.
template <typename P>
V8_INLINE void SetPhantom(P* parameter,
typename WeakCallbackData<T, P>::Callback callback);
typename PhantomCallbackData<P>::Callback callback);
template <typename S, typename P>
V8_INLINE void SetPhantom(P* parameter,
typename WeakCallbackData<S, P>::Callback callback);
template <typename P, typename Q>
V8_INLINE void SetPhantom(
void (*callback)(const InternalFieldsCallbackData<P, Q>&),
int internal_field_index1, int internal_field_index2);
template<typename P>
V8_INLINE P* ClearWeak();
@ -1011,7 +1057,7 @@ class V8_EXPORT Script {
/**
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToGlobalContext()).
* UnboundScript::BindToCurrentContext()).
*/
Local<Value> Run();
@ -1045,7 +1091,11 @@ class V8_EXPORT ScriptCompiler {
BufferOwned
};
CachedData() : data(NULL), length(0), buffer_policy(BufferNotOwned) {}
CachedData()
: data(NULL),
length(0),
rejected(false),
buffer_policy(BufferNotOwned) {}
// If buffer_policy is BufferNotOwned, the caller keeps the ownership of
// data and guarantees that it stays alive until the CachedData object is
@ -1058,6 +1108,7 @@ class V8_EXPORT ScriptCompiler {
// which will be called when V8 no longer needs the data.
const uint8_t* data;
int length;
bool rejected;
BufferPolicy buffer_policy;
private:
@ -1238,6 +1289,26 @@ class V8_EXPORT ScriptCompiler {
static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
Handle<String> full_source_string,
const ScriptOrigin& origin);
/**
* Return a version tag for CachedData for the current V8 version & flags.
*
* This value is meant only for determining whether a previously generated
* CachedData instance is still valid; the tag has no other meaing.
*
* Background: The data carried by CachedData may depend on the exact
* V8 version number or currently compiler flags. This means when
* persisting CachedData, the embedder must take care to not pass in
* data from another V8 version, or the same version with different
* features enabled.
*
* The easiest way to do so is to clear the embedder's cache on any
* such change.
*
* Alternatively, this tag can be stored alongside the cached data and
* compared when it is being used.
*/
static uint32_t CachedDataVersionTag();
};
@ -1797,6 +1868,15 @@ class V8_EXPORT Boolean : public Primitive {
*/
class V8_EXPORT Name : public Primitive {
public:
/**
* Returns the identity hash for this object. The current implementation
* uses an inline property on the object to store the identity hash.
*
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
*/
int GetIdentityHash();
V8_INLINE static Name* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
@ -2458,6 +2538,8 @@ class V8_EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
static const int kNoInternalFieldIndex = -1;
/** Same as above, but works for Persistents */
V8_INLINE static int InternalFieldCount(
const PersistentBase<Object>& object) {
@ -3520,6 +3602,51 @@ typedef void (*NamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
// TODO(dcarney): Deprecate and remove previous typedefs, and replace
// GenericNamedPropertyFooCallback with just NamedPropertyFooCallback.
/**
* GenericNamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler.
*/
typedef void (*GenericNamedPropertyGetterCallback)(
Local<Name> property, const PropertyCallbackInfo<Value>& info);
/**
* Returns the value if the setter intercepts the request.
* Otherwise, returns an empty handle.
*/
typedef void (*GenericNamedPropertySetterCallback)(
Local<Name> property, Local<Value> value,
const PropertyCallbackInfo<Value>& info);
/**
* Returns a non-empty handle if the interceptor intercepts the request.
* The result is an integer encoding property attributes (like v8::None,
* v8::DontEnum, etc.)
*/
typedef void (*GenericNamedPropertyQueryCallback)(
Local<Name> property, const PropertyCallbackInfo<Integer>& info);
/**
* Returns a non-empty handle if the deleter intercepts the request.
* The return value is true if the property could be deleted and false
* otherwise.
*/
typedef void (*GenericNamedPropertyDeleterCallback)(
Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
/**
* Returns an array containing the names of the properties the named
* property getter intercepts.
*/
typedef void (*GenericNamedPropertyEnumeratorCallback)(
const PropertyCallbackInfo<Array>& info);
/**
* Returns the value of the property if the getter intercepts the
* request. Otherwise, returns an empty handle.
@ -3772,6 +3899,56 @@ class V8_EXPORT FunctionTemplate : public Template {
};
struct NamedPropertyHandlerConfiguration {
NamedPropertyHandlerConfiguration(
/** Note: getter is required **/
GenericNamedPropertyGetterCallback getter = 0,
GenericNamedPropertySetterCallback setter = 0,
GenericNamedPropertyQueryCallback query = 0,
GenericNamedPropertyDeleterCallback deleter = 0,
GenericNamedPropertyEnumeratorCallback enumerator = 0,
Handle<Value> data = Handle<Value>())
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
data(data) {}
GenericNamedPropertyGetterCallback getter;
GenericNamedPropertySetterCallback setter;
GenericNamedPropertyQueryCallback query;
GenericNamedPropertyDeleterCallback deleter;
GenericNamedPropertyEnumeratorCallback enumerator;
Handle<Value> data;
};
struct IndexedPropertyHandlerConfiguration {
IndexedPropertyHandlerConfiguration(
/** Note: getter is required **/
IndexedPropertyGetterCallback getter = 0,
IndexedPropertySetterCallback setter = 0,
IndexedPropertyQueryCallback query = 0,
IndexedPropertyDeleterCallback deleter = 0,
IndexedPropertyEnumeratorCallback enumerator = 0,
Handle<Value> data = Handle<Value>())
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
data(data) {}
IndexedPropertyGetterCallback getter;
IndexedPropertySetterCallback setter;
IndexedPropertyQueryCallback query;
IndexedPropertyDeleterCallback deleter;
IndexedPropertyEnumeratorCallback enumerator;
Handle<Value> data;
};
/**
* An ObjectTemplate is used to create objects at runtime.
*
@ -3841,6 +4018,9 @@ class V8_EXPORT ObjectTemplate : public Template {
* from this object template, the provided callback is invoked instead of
* accessing the property directly on the JavaScript object.
*
* Note that new code should use the second version that can intercept
* symbol-named properties as well as string-named properties.
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
* \param query The callback to invoke to check if a property is present,
@ -3851,6 +4031,7 @@ class V8_EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
// TODO(dcarney): deprecate
void SetNamedPropertyHandler(
NamedPropertyGetterCallback getter,
NamedPropertySetterCallback setter = 0,
@ -3858,6 +4039,7 @@ class V8_EXPORT ObjectTemplate : public Template {
NamedPropertyDeleterCallback deleter = 0,
NamedPropertyEnumeratorCallback enumerator = 0,
Handle<Value> data = Handle<Value>());
void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
/**
* Sets an indexed property handler on the object template.
@ -3875,14 +4057,18 @@ class V8_EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
// TODO(dcarney): deprecate
void SetIndexedPropertyHandler(
IndexedPropertyGetterCallback getter,
IndexedPropertySetterCallback setter = 0,
IndexedPropertyQueryCallback query = 0,
IndexedPropertyDeleterCallback deleter = 0,
IndexedPropertyEnumeratorCallback enumerator = 0,
Handle<Value> data = Handle<Value>());
Handle<Value> data = Handle<Value>()) {
SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
deleter, enumerator, data));
}
/**
* Sets the callback to be used when calling instances created from
* this template as a function. If no callback is set, instances
@ -4188,9 +4374,17 @@ class V8_EXPORT Exception {
static Local<Value> TypeError(Handle<String> message);
static Local<Value> Error(Handle<String> message);
static Local<Message> GetMessage(Handle<Value> exception);
/**
* Creates an error message for the given exception.
* Will try to reconstruct the original stack trace from the exception value,
* or capture the current stack trace if not available.
*/
static Local<Message> CreateMessage(Handle<Value> exception);
// DEPRECATED. Use GetMessage()->GetStackTrace()
/**
* Returns the original stack trace that was captured at the creation time
* of a given exception, or an empty handle if not available.
*/
static Local<StackTrace> GetStackTrace(Handle<Value> exception);
};
@ -4207,18 +4401,19 @@ typedef void* (*CreateHistogramCallback)(const char* name,
typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
// --- Memory Allocation Callback ---
enum ObjectSpace {
kObjectSpaceNewSpace = 1 << 0,
kObjectSpaceOldPointerSpace = 1 << 1,
kObjectSpaceOldDataSpace = 1 << 2,
kObjectSpaceCodeSpace = 1 << 3,
kObjectSpaceMapSpace = 1 << 4,
kObjectSpaceLoSpace = 1 << 5,
kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
kObjectSpaceLoSpace
};
enum ObjectSpace {
kObjectSpaceNewSpace = 1 << 0,
kObjectSpaceOldPointerSpace = 1 << 1,
kObjectSpaceOldDataSpace = 1 << 2,
kObjectSpaceCodeSpace = 1 << 3,
kObjectSpaceMapSpace = 1 << 4,
kObjectSpaceCellSpace = 1 << 5,
kObjectSpacePropertyCellSpace = 1 << 6,
kObjectSpaceLoSpace = 1 << 7,
kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace |
kObjectSpaceMapSpace | kObjectSpaceLoSpace
};
enum AllocationAction {
kAllocationActionAllocate = 1 << 0,
@ -4252,7 +4447,7 @@ class PromiseRejectMessage {
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Handle<Value> GetValue() const { return value_; }
// DEPRECATED. Use v8::Exception::GetMessage(GetValue())->GetStackTrace()
// DEPRECATED. Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()
V8_INLINE Handle<StackTrace> GetStackTrace() const { return stack_trace_; }
private:
@ -4617,6 +4812,8 @@ class V8_EXPORT Isolate {
/**
* Returns the entered isolate for the current thread or NULL in
* case there is no current isolate.
*
* This method must not be invoked before V8::Initialize() was invoked.
*/
static Isolate* GetCurrent();
@ -4854,8 +5051,7 @@ class V8_EXPORT Isolate {
* Request V8 to interrupt long running JavaScript code and invoke
* the given |callback| passing the given |data| to it. After |callback|
* returns control will be returned to the JavaScript code.
* At any given moment V8 can remember only a single callback for the very
* last interrupt request.
* There may be a number of interrupt requests in flight.
* Can be called from another thread without acquiring a |Locker|.
* Registered |callback| must not reenter interrupted Isolate.
*/
@ -4865,7 +5061,8 @@ class V8_EXPORT Isolate {
* Clear interrupt request created by |RequestInterrupt|.
* Can be called from another thread without acquiring a |Locker|.
*/
void ClearInterrupt();
V8_DEPRECATED("There's no way to clear interrupts in flight.",
void ClearInterrupt());
/**
* Request garbage collection in this Isolate. It is only valid to call this
@ -4954,17 +5151,23 @@ class V8_EXPORT Isolate {
/**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* V8 uses the notification to perform garbage collection.
* This call can be used repeatedly if the embedder remains idle.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*
* The idle_time_in_ms argument specifies the time V8 has to do reduce
* the memory footprint. There is no guarantee that the actual work will be
* The idle_time_in_ms argument specifies the time V8 has to perform
* garbage collection. There is no guarantee that the actual work will be
* done within the time limit.
* The deadline_in_seconds argument specifies the deadline V8 has to finish
* garbage collection work. deadline_in_seconds is compared with
* MonotonicallyIncreasingTime() and should be based on the same timebase as
* that function. There is no guarantee that the actual work will be done
* within the time limit.
*/
bool IdleNotification(int idle_time_in_ms);
bool IdleNotificationDeadline(double deadline_in_seconds);
/**
* Optional notification that the system is running low on memory.
@ -4977,8 +5180,11 @@ class V8_EXPORT Isolate {
* these notifications to guide the GC heuristic. Returns the number
* of context disposals - including this one - since the last time
* V8 had a chance to clean up.
*
* The optional parameter |dependant_context| specifies whether the disposed
* context was depending on state from other contexts or not.
*/
int ContextDisposedNotification();
int ContextDisposedNotification(bool dependant_context = true);
/**
* Allows the host application to provide the address of a function that is
@ -5127,42 +5333,11 @@ class V8_EXPORT Isolate {
class V8_EXPORT StartupData {
public:
enum CompressionAlgorithm {
kUncompressed,
kBZip2
};
const char* data;
int compressed_size;
int raw_size;
};
/**
* A helper class for driving V8 startup data decompression. It is based on
* "CompressedStartupData" API functions from the V8 class. It isn't mandatory
* for an embedder to use this class, instead, API functions can be used
* directly.
*
* For an example of the class usage, see the "shell.cc" sample application.
*/
class V8_EXPORT StartupDataDecompressor { // NOLINT
public:
StartupDataDecompressor();
virtual ~StartupDataDecompressor();
int Decompress();
protected:
virtual int DecompressData(char* raw_data,
int* raw_data_size,
const char* compressed_data,
int compressed_data_size) = 0;
private:
char** raw_data;
};
/**
* EntropySource is used as a callback function when v8 needs a source
* of entropy.
@ -5219,30 +5394,6 @@ class V8_EXPORT V8 {
// TODO(dcarney): deprecate this.
V8_INLINE static bool IsDead();
/**
* The following 4 functions are to be used when V8 is built with
* the 'compress_startup_data' flag enabled. In this case, the
* embedder must decompress startup data prior to initializing V8.
*
* This is how interaction with V8 should look like:
* int compressed_data_count = v8::V8::GetCompressedStartupDataCount();
* v8::StartupData* compressed_data =
* new v8::StartupData[compressed_data_count];
* v8::V8::GetCompressedStartupData(compressed_data);
* ... decompress data (compressed_data can be updated in-place) ...
* v8::V8::SetDecompressedStartupData(compressed_data);
* ... now V8 can be initialized
* ... make sure the decompressed data stays valid until V8 shutdown
*
* A helper class StartupDataDecompressor is provided. It implements
* the protocol of the interaction described above, and can be used in
* most cases instead of calling these API functions directly.
*/
static StartupData::CompressionAlgorithm GetCompressedStartupDataAlgorithm();
static int GetCompressedStartupDataCount();
static void GetCompressedStartupData(StartupData* compressed_data);
static void SetDecompressedStartupData(StartupData* decompressed_data);
/**
* Hand startup data to V8, in case the embedder has chosen to build
* V8 with external startup data.
@ -5261,6 +5412,13 @@ class V8_EXPORT V8 {
static void SetNativesDataBlob(StartupData* startup_blob);
static void SetSnapshotDataBlob(StartupData* startup_blob);
/**
* Create a new isolate and context for the purpose of capturing a snapshot
* Returns { NULL, 0 } on failure.
* The caller owns the data array in the return value.
*/
static StartupData CreateSnapshotDataBlob();
/**
* Adds a message listener.
*
@ -5509,7 +5667,14 @@ class V8_EXPORT V8 {
static void DisposeGlobal(internal::Object** global_handle);
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback, WeakHandleType phantom);
WeakCallback weak_callback);
static void MakePhantom(internal::Object** global_handle, void* data,
PhantomCallbackData<void>::Callback weak_callback);
static void MakePhantom(
internal::Object** global_handle,
InternalFieldsCallbackData<void, void>::Callback weak_callback,
int internal_field_index1,
int internal_field_index2 = Object::kNoInternalFieldIndex);
static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
@ -6118,12 +6283,12 @@ class Internals {
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
static const int kNodeStateMask = 0xf;
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
static const int kNodeStateIsNearDeathValue = 4;
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsPartiallyDependentShift = 4;
static const int kJSObjectType = 0xbd;
static const int kFirstNonstringType = 0x80;
@ -6381,7 +6546,7 @@ void PersistentBase<T>::SetWeak(
TYPE_CHECK(S, T);
typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
reinterpret_cast<Callback>(callback), V8::NonphantomHandle);
reinterpret_cast<Callback>(callback));
}
@ -6395,21 +6560,24 @@ void PersistentBase<T>::SetWeak(
template <class T>
template <typename S, typename P>
template <typename P>
void PersistentBase<T>::SetPhantom(
P* parameter, typename WeakCallbackData<S, P>::Callback callback) {
TYPE_CHECK(S, T);
typedef typename WeakCallbackData<Value, void>::Callback Callback;
V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
reinterpret_cast<Callback>(callback), V8::PhantomHandle);
P* parameter, typename PhantomCallbackData<P>::Callback callback) {
typedef typename PhantomCallbackData<void>::Callback Callback;
V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_), parameter,
reinterpret_cast<Callback>(callback));
}
template <class T>
template <typename P>
template <typename U, typename V>
void PersistentBase<T>::SetPhantom(
P* parameter, typename WeakCallbackData<T, P>::Callback callback) {
SetPhantom<T, P>(parameter, callback);
void (*callback)(const InternalFieldsCallbackData<U, V>&),
int internal_field_index1, int internal_field_index2) {
typedef typename InternalFieldsCallbackData<void, void>::Callback Callback;
V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_),
reinterpret_cast<Callback>(callback), internal_field_index1,
internal_field_index2);
}

11
deps/v8/include/v8config.h

@ -142,13 +142,12 @@
// -----------------------------------------------------------------------------
// Compiler detection
//
// V8_CC_CLANG - Clang
// V8_CC_GNU - GNU C++
// V8_CC_GNU - GCC, or clang in gcc mode
// V8_CC_INTEL - Intel C++
// V8_CC_MINGW - Minimalist GNU for Windows
// V8_CC_MINGW32 - Minimalist GNU for Windows (mingw32)
// V8_CC_MINGW64 - Minimalist GNU for Windows (mingw-w64)
// V8_CC_MSVC - Microsoft Visual C/C++
// V8_CC_MSVC - Microsoft Visual C/C++, or clang in cl.exe mode
//
// C++11 feature detection
//
@ -193,7 +192,11 @@
#if defined(__clang__)
# define V8_CC_CLANG 1
#if defined(__GNUC__) // Clang in gcc mode.
# define V8_CC_GNU 1
#elif defined(_MSC_VER) // Clang in cl mode.
# define V8_CC_MSVC 1
#endif
// Clang defines __alignof__ as alias for __alignof
# define V8_HAS___ALIGNOF 1

385
deps/v8/samples/lineprocessor.cc

@ -1,385 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <include/v8.h>
#include <include/libplatform/libplatform.h>
#include <include/v8-debug.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/**
* This sample program should demonstrate certain aspects of debugging
* standalone V8-based application.
*
* The program reads input stream, processes it line by line and print
* the result to output. The actual processing is done by custom JavaScript
* script. The script is specified with command line parameters.
*
* The main cycle of the program will sequentially read lines from standard
* input, process them and print to standard output until input closes.
* There are 2 possible configuration in regard to main cycle.
*
* 1. The main cycle is on C++ side. Program should be run with
* --main-cycle-in-cpp option. Script must declare a function named
* "ProcessLine". The main cycle in C++ reads lines and calls this function
* for processing every time. This is a sample script:
function ProcessLine(input_line) {
return ">>>" + input_line + "<<<";
}
*
* 2. The main cycle is in JavaScript. Program should be run with
* --main-cycle-in-js option. Script gets run one time at all and gets
* API of 2 global functions: "read_line" and "print". It should read input
* and print converted lines to output itself. This a sample script:
while (true) {
var line = read_line();
if (!line) {
break;
}
var res = line + " | " + line;
print(res);
}
*/
enum MainCycleType {
CycleInCpp,
CycleInJs
};
const char* ToCString(const v8::String::Utf8Value& value);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
v8::Handle<v8::String> ReadLine();
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args);
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions);
v8::Persistent<v8::Context> debug_message_context;
int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::New();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> script_source;
v8::Handle<v8::Value> script_name;
int script_param_counter = 0;
MainCycleType cycle_type = CycleInCpp;
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with the other stand-
// alone JavaScript engines.
continue;
} else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
script_source = v8::String::NewFromUtf8(isolate, argv[i + 1]);
script_name = v8::String::NewFromUtf8(isolate, "unnamed");
i++;
script_param_counter++;
} else {
// Use argument as a name of file to load.
script_source = ReadFile(isolate, str);
script_name = v8::String::NewFromUtf8(isolate, str);
if (script_source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
}
script_param_counter++;
}
}
if (script_param_counter == 0) {
printf("Script is not specified\n");
return 1;
}
if (script_param_counter != 1) {
printf("Only one script may be specified\n");
return 1;
}
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::NewFromUtf8(isolate, "print"),
v8::FunctionTemplate::New(isolate, Print));
if (cycle_type == CycleInJs) {
// Bind the global 'read_line' function to the C++ Print callback.
global->Set(v8::String::NewFromUtf8(isolate, "read_line"),
v8::FunctionTemplate::New(isolate, ReadLine));
}
// Create a new execution environment containing the built-in
// functions
v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
debug_message_context.Reset(isolate, context);
bool report_exceptions = true;
v8::Handle<v8::Script> script;
{
// Compile script in try/catch context.
v8::TryCatch try_catch;
v8::ScriptOrigin origin(script_name);
script = v8::Script::Compile(script_source, &origin);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
if (report_exceptions)
ReportException(isolate, &try_catch);
return 1;
}
}
{
v8::TryCatch try_catch;
script->Run();
if (try_catch.HasCaught()) {
if (report_exceptions)
ReportException(isolate, &try_catch);
return 1;
}
}
if (cycle_type == CycleInCpp) {
bool res = RunCppCycle(script,
isolate->GetCurrentContext(),
report_exceptions);
return !res;
} else {
// All is already done.
}
return 0;
}
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions) {
v8::Isolate* isolate = context->GetIsolate();
v8::Handle<v8::String> fun_name =
v8::String::NewFromUtf8(isolate, "ProcessLine");
v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
// If there is no Process function, or if it is not a function,
// bail out
if (!process_val->IsFunction()) {
printf("Error: Script does not declare 'ProcessLine' global function.\n");
return 1;
}
// It is a function; cast it to a Function
v8::Handle<v8::Function> process_fun =
v8::Handle<v8::Function>::Cast(process_val);
while (!feof(stdin)) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> input_line = ReadLine();
if (input_line == v8::Undefined(isolate)) {
continue;
}
const int argc = 1;
v8::Handle<v8::Value> argv[argc] = { input_line };
v8::Handle<v8::Value> result;
{
v8::TryCatch try_catch;
result = process_fun->Call(isolate->GetCurrentContext()->Global(),
argc, argv);
if (try_catch.HasCaught()) {
if (report_exceptions)
ReportException(isolate, &try_catch);
return false;
}
}
v8::String::Utf8Value str(result);
const char* cstr = ToCString(str);
printf("%s\n", cstr);
}
return true;
}
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
int result = RunMain(argc, argv);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
return result;
}
// Extracts a C string from a V8 Utf8Value.
const char* ToCString(const v8::String::Utf8Value& value) {
return *value ? *value : "<string conversion failed>";
}
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return v8::Handle<v8::String>();
fseek(file, 0, SEEK_END);
int size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
i += read;
}
fclose(file);
v8::Handle<v8::String> result =
v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
delete[] chars;
return result;
}
void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
v8::HandleScope handle_scope(isolate);
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
v8::Handle<v8::Message> message = try_catch->Message();
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
printf("%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline);
printf("%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
for (int i = 0; i < start; i++) {
printf(" ");
}
int end = message->GetEndColumn();
for (int i = start; i < end; i++) {
printf("^");
}
printf("\n");
}
}
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
if (first) {
first = false;
} else {
printf(" ");
}
v8::String::Utf8Value str(args[i]);
const char* cstr = ToCString(str);
printf("%s", cstr);
}
printf("\n");
fflush(stdout);
}
// The callback that is invoked by v8 whenever the JavaScript 'read_line'
// function is called. Reads a string from standard input and returns.
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
args.GetIsolate()->ThrowException(
v8::String::NewFromUtf8(args.GetIsolate(), "Unexpected arguments"));
return;
}
args.GetReturnValue().Set(ReadLine());
}
v8::Handle<v8::String> ReadLine() {
const int kBufferSize = 1024 + 1;
char buffer[kBufferSize];
char* res;
{
res = fgets(buffer, kBufferSize, stdin);
}
v8::Isolate* isolate = v8::Isolate::GetCurrent();
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined(isolate);
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
for (char* pos = buffer; *pos != '\0'; pos++) {
if (*pos == '\n') {
*pos = '\0';
break;
}
}
return v8::String::NewFromUtf8(isolate, buffer);
}

25
deps/v8/samples/process.cc

@ -32,10 +32,6 @@
#include <map>
#include <string>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
#endif
using namespace std;
using namespace v8;
@ -116,10 +112,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
const PropertyCallbackInfo<Value>& info);
// Callbacks that access maps
static void MapGet(Local<String> name,
const PropertyCallbackInfo<Value>& info);
static void MapSet(Local<String> name,
Local<Value> value,
static void MapGet(Local<Name> name, const PropertyCallbackInfo<Value>& info);
static void MapSet(Local<Name> name, Local<Value> value,
const PropertyCallbackInfo<Value>& info);
// Utility methods for wrapping C++ objects as JavaScript objects,
@ -359,13 +353,15 @@ string ObjectToString(Local<Value> value) {
}
void JsHttpRequestProcessor::MapGet(Local<String> name,
void JsHttpRequestProcessor::MapGet(Local<Name> name,
const PropertyCallbackInfo<Value>& info) {
if (name->IsSymbol()) return;
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the JavaScript string to a std::string.
string key = ObjectToString(name);
string key = ObjectToString(Local<String>::Cast(name));
// Look up the value if it exists using the standard STL ideom.
map<string, string>::iterator iter = obj->find(key);
@ -381,14 +377,15 @@ void JsHttpRequestProcessor::MapGet(Local<String> name,
}
void JsHttpRequestProcessor::MapSet(Local<String> name,
Local<Value> value_obj,
void JsHttpRequestProcessor::MapSet(Local<Name> name, Local<Value> value_obj,
const PropertyCallbackInfo<Value>& info) {
if (name->IsSymbol()) return;
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
// Convert the key and value to std::strings.
string key = ObjectToString(name);
string key = ObjectToString(Local<String>::Cast(name));
string value = ObjectToString(value_obj);
// Update the map.
@ -405,7 +402,7 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
result->SetInternalFieldCount(1);
result->SetNamedPropertyHandler(MapGet, MapSet);
result->SetHandler(NamedPropertyHandlerConfiguration(MapGet, MapSet));
// Again, return the result through the current handle scope.
return handle_scope.Escape(result);

6
deps/v8/samples/samples.gyp

@ -67,11 +67,5 @@
'process.cc',
],
},
{
'target_name': 'lineprocessor',
'sources': [
'lineprocessor.cc',
],
}
],
}

4
deps/v8/samples/shell.cc

@ -35,10 +35,6 @@
#include <stdlib.h>
#include <string.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
#endif
/**
* This sample program shows how to implement a simple javascript shell
* based on V8. This includes initializing V8 with command line options,

94
deps/v8/src/accessors.cc

@ -181,7 +181,7 @@ void Accessors::ArgumentsIteratorSetter(
Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<Name> name(isolate->native_context()->iterator_symbol(), isolate);
Handle<Name> name = isolate->factory()->iterator_symbol();
return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
&ArgumentsIteratorSetter, attributes);
}
@ -322,6 +322,98 @@ Handle<AccessorInfo> Accessors::StringLengthInfo(
}
template <typename Char>
inline int CountRequiredEscapes(Handle<String> source) {
DisallowHeapAllocation no_gc;
int escapes = 0;
Vector<const Char> src = source->GetCharVector<Char>();
for (int i = 0; i < src.length(); i++) {
if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
}
return escapes;
}
template <typename Char, typename StringType>
inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
Handle<StringType> result) {
DisallowHeapAllocation no_gc;
Vector<const Char> src = source->GetCharVector<Char>();
Vector<Char> dst(result->GetChars(), result->length());
int s = 0;
int d = 0;
while (s < src.length()) {
if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
dst[d++] = src[s++];
}
DCHECK_EQ(result->length(), d);
return result;
}
MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
Handle<String> source) {
String::Flatten(source);
if (source->length() == 0) return isolate->factory()->query_colon_string();
bool one_byte = source->IsOneByteRepresentationUnderneath();
int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
: CountRequiredEscapes<uc16>(source);
if (escapes == 0) return source;
int length = source->length() + escapes;
if (one_byte) {
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
isolate->factory()->NewRawOneByteString(length),
String);
return WriteEscapedRegExpSource<uint8_t>(source, result);
} else {
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
isolate->factory()->NewRawTwoByteString(length),
String);
return WriteEscapedRegExpSource<uc16>(source, result);
}
}
// Implements ECMA262 ES6 draft 21.2.5.9
void Accessors::RegExpSourceGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> receiver =
Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(receiver);
Handle<String> result;
if (regexp->TypeTag() == JSRegExp::NOT_COMPILED) {
result = isolate->factory()->empty_string();
} else {
Handle<String> pattern(regexp->Pattern(), isolate);
MaybeHandle<String> maybe = EscapeRegExpSource(isolate, pattern);
if (!maybe.ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
return;
}
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
void Accessors::RegExpSourceSetter(v8::Local<v8::Name> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
}
Handle<AccessorInfo> Accessors::RegExpSourceInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->source_string(),
&RegExpSourceGetter, &RegExpSourceSetter, attributes);
}
//
// Accessors::ScriptColumnOffset
//

1
deps/v8/src/accessors.h

@ -21,6 +21,7 @@ namespace internal {
V(FunctionName) \
V(FunctionLength) \
V(FunctionPrototype) \
V(RegExpSource) \
V(ScriptColumnOffset) \
V(ScriptCompilationType) \
V(ScriptContextData) \

396
deps/v8/src/api.cc

@ -14,6 +14,7 @@
#include "include/v8-testing.h"
#include "src/assert-scope.h"
#include "src/background-parsing-task.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
@ -195,149 +196,60 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
}
StartupDataDecompressor::StartupDataDecompressor()
: raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
raw_data[i] = NULL;
}
void V8::SetNativesDataBlob(StartupData* natives_blob) {
i::V8::SetNativesBlob(natives_blob);
}
StartupDataDecompressor::~StartupDataDecompressor() {
for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
i::DeleteArray(raw_data[i]);
}
i::DeleteArray(raw_data);
void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
int StartupDataDecompressor::Decompress() {
int compressed_data_count = V8::GetCompressedStartupDataCount();
StartupData* compressed_data =
i::NewArray<StartupData>(compressed_data_count);
V8::GetCompressedStartupData(compressed_data);
for (int i = 0; i < compressed_data_count; ++i) {
char* decompressed = raw_data[i] =
i::NewArray<char>(compressed_data[i].raw_size);
if (compressed_data[i].compressed_size != 0) {
int result = DecompressData(decompressed,
&compressed_data[i].raw_size,
compressed_data[i].data,
compressed_data[i].compressed_size);
if (result != 0) return result;
} else {
DCHECK_EQ(0, compressed_data[i].raw_size);
StartupData V8::CreateSnapshotDataBlob() {
Isolate::CreateParams params;
params.enable_serializer = true;
Isolate* isolate = v8::Isolate::New(params);
StartupData result = {NULL, 0};
{
Isolate::Scope isolate_scope(isolate);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
Persistent<Context> context;
{
HandleScope handle_scope(isolate);
context.Reset(isolate, Context::New(isolate));
}
compressed_data[i].data = decompressed;
}
V8::SetDecompressedStartupData(compressed_data);
i::DeleteArray(compressed_data);
return 0;
}
StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return StartupData::kBZip2;
#else
return StartupData::kUncompressed;
#endif
}
enum CompressedStartupDataItems {
kSnapshot = 0,
kSnapshotContext,
kLibraries,
kExperimentalLibraries,
kCompressedStartupDataCount
};
int V8::GetCompressedStartupDataCount() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return kCompressedStartupDataCount;
#else
return 0;
#endif
}
void V8::GetCompressedStartupData(StartupData* compressed_data) {
#ifdef COMPRESS_STARTUP_DATA_BZ2
compressed_data[kSnapshot].data =
reinterpret_cast<const char*>(i::Snapshot::data());
compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
compressed_data[kSnapshotContext].data =
reinterpret_cast<const char*>(i::Snapshot::context_data());
compressed_data[kSnapshotContext].compressed_size =
i::Snapshot::context_size();
compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
compressed_data[kLibraries].data =
reinterpret_cast<const char*>(libraries_source.start());
compressed_data[kLibraries].compressed_size = libraries_source.length();
compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
i::Vector<const i::byte> exp_libraries_source =
i::ExperimentalNatives::GetScriptsSource();
compressed_data[kExperimentalLibraries].data =
reinterpret_cast<const char*>(exp_libraries_source.start());
compressed_data[kExperimentalLibraries].compressed_size =
exp_libraries_source.length();
compressed_data[kExperimentalLibraries].raw_size =
i::ExperimentalNatives::GetRawScriptsSize();
#endif
}
void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
#ifdef COMPRESS_STARTUP_DATA_BZ2
DCHECK_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
i::Snapshot::set_raw_data(
reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
DCHECK_EQ(i::Snapshot::context_raw_size(),
decompressed_data[kSnapshotContext].raw_size);
i::Snapshot::set_context_raw_data(
reinterpret_cast<const i::byte*>(
decompressed_data[kSnapshotContext].data));
DCHECK_EQ(i::Natives::GetRawScriptsSize(),
decompressed_data[kLibraries].raw_size);
i::Vector<const char> libraries_source(
decompressed_data[kLibraries].data,
decompressed_data[kLibraries].raw_size);
i::Natives::SetRawScriptsSource(libraries_source);
DCHECK_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
decompressed_data[kExperimentalLibraries].raw_size);
i::Vector<const char> exp_libraries_source(
decompressed_data[kExperimentalLibraries].data,
decompressed_data[kExperimentalLibraries].raw_size);
i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
#endif
}
if (!context.IsEmpty()) {
// Make sure all builtin scripts are cached.
{
HandleScope scope(isolate);
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
internal_isolate->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
i::Object* raw_context = *v8::Utils::OpenPersistent(context);
context.Reset();
i::SnapshotByteSink snapshot_sink;
i::StartupSerializer ser(internal_isolate, &snapshot_sink);
ser.SerializeStrongReferences();
void V8::SetNativesDataBlob(StartupData* natives_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
i::SetNativesFromFile(natives_blob);
#else
CHECK(false);
#endif
}
i::SnapshotByteSink context_sink;
i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
context_ser.Serialize(&raw_context);
ser.SerializeWeakReferences();
i::SnapshotData sd(snapshot_sink, ser);
i::SnapshotData csd(context_sink, context_ser);
void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
i::SetSnapshotFromFile(snapshot_blob);
#else
CHECK(false);
#endif
result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData());
}
}
isolate->Dispose();
return result;
}
@ -474,28 +386,40 @@ void SetResourceConstraints(i::Isolate* isolate,
i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "Persistent::New");
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef DEBUG
#ifdef VERIFY_HEAP
(*obj)->ObjectVerify();
#endif // DEBUG
#endif // VERIFY_HEAP
return result.location();
}
i::Object** V8::CopyPersistent(i::Object** obj) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
#ifdef DEBUG
#ifdef VERIFY_HEAP
(*obj)->ObjectVerify();
#endif // DEBUG
#endif // VERIFY_HEAP
return result.location();
}
void V8::MakeWeak(i::Object** object, void* parameters,
WeakCallback weak_callback, V8::WeakHandleType weak_type) {
i::GlobalHandles::PhantomState phantom;
phantom = weak_type == V8::PhantomHandle ? i::GlobalHandles::Phantom
: i::GlobalHandles::Nonphantom;
i::GlobalHandles::MakeWeak(object, parameters, weak_callback, phantom);
void V8::MakeWeak(i::Object** object, void* parameter,
WeakCallback weak_callback) {
i::GlobalHandles::MakeWeak(object, parameter, weak_callback);
}
void V8::MakePhantom(i::Object** object, void* parameter,
PhantomCallbackData<void>::Callback weak_callback) {
i::GlobalHandles::MakePhantom(object, parameter, weak_callback);
}
void V8::MakePhantom(
i::Object** object,
InternalFieldsCallbackData<void, void>::Callback weak_callback,
int internal_field_index1, int internal_field_index2) {
i::GlobalHandles::MakePhantom(object, weak_callback, internal_field_index1,
internal_field_index2);
}
@ -890,6 +814,9 @@ Local<FunctionTemplate> FunctionTemplate::New(
v8::Handle<Signature> signature,
int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
DCHECK(!i_isolate->serializer_enabled());
LOG_API(i_isolate, "FunctionTemplate::New");
ENTER_V8(i_isolate);
return FunctionTemplateNew(
@ -1229,6 +1156,9 @@ Local<ObjectTemplate> ObjectTemplate::New() {
Local<ObjectTemplate> ObjectTemplate::New(
i::Isolate* isolate,
v8::Handle<FunctionTemplate> constructor) {
// Changes to the environment cannot be captured in the snapshot. Expect no
// object templates when the isolate is created for serialization.
DCHECK(!isolate->serializer_enabled());
LOG_API(isolate, "ObjectTemplate::New");
ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
@ -1374,19 +1304,20 @@ void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
}
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter,
NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query,
NamedPropertyDeleterCallback remover,
NamedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
template <typename Getter, typename Setter, typename Query, typename Deleter,
typename Enumerator>
static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
Getter getter, Setter setter,
Query query, Deleter remover,
Enumerator enumerator,
Handle<Value> data,
bool can_intercept_symbols) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
EnsureConstructor(isolate, this);
i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
Utils::OpenHandle(this)->constructor());
EnsureConstructor(isolate, templ);
i::FunctionTemplateInfo* constructor =
i::FunctionTemplateInfo::cast(Utils::OpenHandle(templ)->constructor());
i::Handle<i::FunctionTemplateInfo> cons(constructor);
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
@ -1398,6 +1329,8 @@ void ObjectTemplate::SetNamedPropertyHandler(
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
obj->set_flags(0);
obj->set_can_intercept_symbols(can_intercept_symbols);
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@ -1407,6 +1340,23 @@ void ObjectTemplate::SetNamedPropertyHandler(
}
void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) {
ObjectTemplateSetNamedPropertyHandler(this, getter, setter, query, remover,
enumerator, data, false);
}
void ObjectTemplate::SetHandler(
const NamedPropertyHandlerConfiguration& config) {
ObjectTemplateSetNamedPropertyHandler(this, config.getter, config.setter,
config.query, config.deleter,
config.enumerator, config.data, true);
}
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
@ -1450,13 +1400,8 @@ void ObjectTemplate::SetAccessCheckCallbacks(
}
void ObjectTemplate::SetIndexedPropertyHandler(
IndexedPropertyGetterCallback getter,
IndexedPropertySetterCallback setter,
IndexedPropertyQueryCallback query,
IndexedPropertyDeleterCallback remover,
IndexedPropertyEnumeratorCallback enumerator,
Handle<Value> data) {
void ObjectTemplate::SetHandler(
const IndexedPropertyHandlerConfiguration& config) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@ -1469,12 +1414,16 @@ void ObjectTemplate::SetIndexedPropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
if (config.query != 0) SET_FIELD_WRAPPED(obj, set_query, config.query);
if (config.deleter != 0) SET_FIELD_WRAPPED(obj, set_deleter, config.deleter);
if (config.enumerator != 0) {
SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
}
obj->set_flags(0);
v8::Local<v8::Value> data = config.data;
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@ -1536,7 +1485,10 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
BufferPolicy buffer_policy_)
: data(data_), length(length_), buffer_policy(buffer_policy_) {}
: data(data_),
length(length_),
rejected(false),
buffer_policy(buffer_policy_) {}
ScriptCompiler::CachedData::~CachedData() {
@ -1567,7 +1519,7 @@ Local<Script> UnboundScript::BindToCurrentContext() {
function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
i::Handle<i::JSFunction> function =
obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
function_info, obj->GetIsolate()->global_context());
function_info, obj->GetIsolate()->native_context());
return ToApiHandle<Script>(function);
}
@ -1697,6 +1649,12 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
options = kConsumeParserCache;
}
// Don't try to produce any kind of cache when the debugger is loaded.
if (isolate->debug()->is_loaded() &&
(options == kProduceParserCache || options == kProduceCodeCache)) {
options = kNoCompileOptions;
}
i::ScriptData* script_data = NULL;
if (options == kConsumeParserCache || options == kConsumeCodeCache) {
DCHECK(source->cached_data);
@ -1732,7 +1690,7 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
str, name_obj, line_offset, column_offset, is_shared_cross_origin,
isolate->global_context(), NULL, &script_data, options,
isolate->native_context(), NULL, &script_data, options,
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
@ -1752,6 +1710,8 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
source->cached_data = new CachedData(
script_data->data(), script_data->length(), CachedData::BufferOwned);
script_data->ReleaseDataOwnership();
} else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
delete script_data;
}
@ -1777,17 +1737,6 @@ Local<Script> ScriptCompiler::Compile(
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
if (!isolate->global_context().is_null() &&
!isolate->global_context()->IsNativeContext()) {
// The context chain is non-trivial, and constructing the corresponding
// non-trivial Scope chain outside the V8 heap is not implemented. Don't
// stream the script. This will only occur if Harmony scoping is enabled and
// a previous script has introduced "let" or "const" variables. TODO(marja):
// Implement externalizing ScopeInfos and constructing non-trivial Scope
// chains independent of the V8 heap so that we can stream also in this
// case.
return NULL;
}
return new i::BackgroundParsingTask(source->impl(), options,
i::FLAG_stack_size, isolate);
}
@ -1824,7 +1773,7 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
v8::True(v8_isolate));
}
source->info->set_script(script);
source->info->SetContext(isolate->global_context());
source->info->SetContext(isolate->native_context());
EXCEPTION_PREAMBLE(isolate);
@ -1857,6 +1806,13 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
}
uint32_t ScriptCompiler::CachedDataVersionTag() {
return static_cast<uint32_t>(base::hash_combine(
internal::Version::Hash(), internal::FlagList::Hash(),
static_cast<uint32_t>(internal::CpuFeatures::SupportedFeatures())));
}
Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
@ -3022,8 +2978,13 @@ int32_t Value::Int32Value() const {
bool Value::Equals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
if (obj->IsSmi() && other->IsSmi()) {
return obj->Number() == other->Number();
}
i::Object* ho = obj->IsSmi() ? *other : *obj;
i::Isolate* isolate = i::HeapObject::cast(ho)->GetIsolate();
if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
"v8::Value::Equals()",
"Reading from empty handle")) {
@ -3031,7 +2992,6 @@ bool Value::Equals(Handle<Value> that) const {
}
LOG_API(isolate, "Equals");
ENTER_V8(isolate);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// If both obj and other are JSObjects, we'd better compare by identity
// immediately when going into JS builtin. The reason is Invoke
// would overwrite global object receiver with global proxy.
@ -3050,15 +3010,18 @@ bool Value::Equals(Handle<Value> that) const {
bool Value::StrictEquals(Handle<Value> that) const {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
if (obj->IsSmi()) {
return other->IsNumber() && obj->Number() == other->Number();
}
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
"v8::Value::StrictEquals()",
"Reading from empty handle")) {
return false;
}
LOG_API(isolate, "StrictEquals");
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// Must check HeapNumber first, since NaN !== NaN.
if (obj->IsHeapNumber()) {
if (!other->IsNumber()) return false;
@ -3636,7 +3599,9 @@ static inline bool ObjectSetAccessor(Object* obj,
i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
false);
if (result->IsUndefined()) return false;
if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0);
if (fast) {
i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
}
return true;
}
@ -3822,7 +3787,8 @@ void v8::Object::TurnOnAccessCheck() {
// as optimized code does not always handle access checks.
i::Deoptimizer::DeoptimizeGlobalObject(*obj);
i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
i::Handle<i::Map> new_map =
i::Map::Copy(i::Handle<i::Map>(obj->map()), "APITurnOnAccessCheck");
new_map->set_is_access_check_needed(true);
i::JSObject::MigrateToMap(obj, new_map);
}
@ -4334,6 +4300,16 @@ Local<v8::Value> Function::GetBoundFunction() const {
}
int Name::GetIdentityHash() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Name::GetIdentityHash()", return 0);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Name> self = Utils::OpenHandle(this);
return static_cast<int>(self->Hash());
}
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return str->length();
@ -5545,7 +5521,11 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
// We do not expect this to fail. Change this if it does.
// If we are steering towards a range error, do not wait for the error to be
// thrown, and return the null handle instead.
if (left_string->length() + right_string->length() > i::String::kMaxLength) {
return Local<String>();
}
i::Handle<i::String> result = isolate->factory()->NewConsString(
left_string, right_string).ToHandleChecked();
return Utils::ToLocal(result);
@ -5651,7 +5631,6 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@ -6235,27 +6214,21 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
}
static Local<Symbol> GetWellKnownSymbol(Isolate* isolate, const char* name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name =
Utils::OpenHandle(*String::NewFromUtf8(isolate, name));
i::Handle<i::String> part = i_isolate->factory()->for_intern_string();
return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
}
Local<Symbol> v8::Symbol::GetIterator(Isolate* isolate) {
return GetWellKnownSymbol(isolate, "Symbol.iterator");
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
return Utils::ToLocal(i_isolate->factory()->iterator_symbol());
}
Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
return GetWellKnownSymbol(isolate, "Symbol.unscopables");
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
return Utils::ToLocal(i_isolate->factory()->unscopables_symbol());
}
Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
return GetWellKnownSymbol(isolate, "Symbol.toStringTag");
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
return Utils::ToLocal(i_isolate->factory()->to_string_tag_symbol());
}
@ -6503,17 +6476,11 @@ void Isolate::CancelTerminateExecution() {
void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->set_api_interrupt_callback(callback);
isolate->set_api_interrupt_callback_data(data);
isolate->stack_guard()->RequestApiInterrupt();
isolate->RequestInterrupt(callback, data);
}
void Isolate::ClearInterrupt() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->stack_guard()->ClearApiInterrupt();
isolate->set_api_interrupt_callback(NULL);
isolate->set_api_interrupt_callback_data(NULL);
}
@ -6756,6 +6723,15 @@ bool Isolate::IdleNotification(int idle_time_in_ms) {
}
bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!i::FLAG_use_idle_notification) return true;
return isolate->heap()->IdleNotification(deadline_in_seconds);
}
void Isolate::LowMemoryNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
{
@ -6766,9 +6742,9 @@ void Isolate::LowMemoryNotification() {
}
int Isolate::ContextDisposedNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->NotifyContextDisposed();
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
@ -6983,7 +6959,7 @@ DEFINE_ERROR(Error)
#undef DEFINE_ERROR
Local<Message> Exception::GetMessage(Handle<Value> exception) {
Local<Message> Exception::CreateMessage(Handle<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsHeapObject()) return Local<Message>();
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();

47
deps/v8/src/arguments.h

@ -67,39 +67,24 @@ class Arguments BASE_EMBEDDED {
// For each type of callback, we have a list of arguments
// They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures
// F(NamedPropertyEnumeratorCallback, ...)
// F(GenericNamedPropertyEnumeratorCallback, ...)
// F(GenericNamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
F(NamedPropertyGetterCallback, v8::Value, v8::Local<v8::String>) \
F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
F(NamedPropertyQueryCallback, \
v8::Integer, \
v8::Local<v8::String>) \
F(NamedPropertyDeleterCallback, \
v8::Boolean, \
v8::Local<v8::String>) \
F(IndexedPropertyGetterCallback, \
v8::Value, \
uint32_t) \
F(IndexedPropertyQueryCallback, \
v8::Integer, \
uint32_t) \
F(IndexedPropertyDeleterCallback, \
v8::Boolean, \
uint32_t) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
F(NamedPropertySetterCallback, \
v8::Value, \
v8::Local<v8::String>, \
v8::Local<v8::Value>) \
F(IndexedPropertySetterCallback, \
v8::Value, \
uint32_t, \
v8::Local<v8::Value>) \
F(IndexedPropertyEnumeratorCallback, v8::Array)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
F(GenericNamedPropertyQueryCallback, v8::Integer, v8::Local<v8::Name>) \
F(GenericNamedPropertyDeleterCallback, v8::Boolean, v8::Local<v8::Name>) \
F(IndexedPropertyGetterCallback, v8::Value, uint32_t) \
F(IndexedPropertyQueryCallback, v8::Integer, uint32_t) \
F(IndexedPropertyDeleterCallback, v8::Boolean, uint32_t)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
F(GenericNamedPropertySetterCallback, v8::Value, v8::Local<v8::Name>, \
v8::Local<v8::Value>) \
F(IndexedPropertySetterCallback, v8::Value, uint32_t, v8::Local<v8::Value>)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
F(AccessorNameSetterCallback, \

168
deps/v8/src/arm/assembler-arm.cc

@ -127,6 +127,11 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER) {
supported_ |= 1u << COHERENT_CACHE;
}
#endif
DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@ -188,14 +193,15 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
"ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
"MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
CpuFeatures::IsSupported(COHERENT_CACHE));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
@ -1338,7 +1344,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
void Assembler::b(int branch_offset, Condition cond) {
DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
DCHECK(is_int24(imm24));
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
@ -1352,7 +1358,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
positions_recorder()->WriteRecordedPositions();
DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
DCHECK(is_int24(imm24));
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@ -1362,7 +1368,7 @@ void Assembler::blx(int branch_offset) { // v5 and above
DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
DCHECK(is_int24(imm24));
CHECK(is_int24(imm24));
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
@ -1504,7 +1510,7 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
//
// When the label gets bound: target_at extracts the link and target_at_put
// patches the instructions.
DCHECK(is_uint24(link));
CHECK(is_uint24(link));
BlockConstPoolScope block_const_pool(this);
emit(link);
nop(dst.code());
@ -1798,71 +1804,119 @@ void Assembler::pkhtb(Register dst,
}
void Assembler::uxtb(Register dst,
const Operand& src,
Condition cond) {
void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.rm().is(pc));
DCHECK(!src.rm().is(no_reg));
DCHECK(src.rs().is(no_reg));
DCHECK((src.shift_imm_ == 0) ||
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
// Operand maps ROR #0 to LSL #0.
DCHECK((src.shift_op() == ROR) ||
((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
void Assembler::uxtab(Register dst,
Register src1,
const Operand& src2,
DCHECK(!src.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.rm().is(pc));
DCHECK(!src2.rm().is(no_reg));
DCHECK(src2.rs().is(no_reg));
DCHECK((src2.shift_imm_ == 0) ||
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
// Operand maps ROR #0 to LSL #0.
DCHECK((src2.shift_op() == ROR) ||
((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
DCHECK(!src2.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
void Assembler::uxtb16(Register dst,
const Operand& src,
Condition cond) {
void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.rm().is(pc));
DCHECK(!src.rm().is(no_reg));
DCHECK(src.rs().is(no_reg));
DCHECK((src.shift_imm_ == 0) ||
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
// Operand maps ROR #0 to LSL #0.
DCHECK((src.shift_op() == ROR) ||
((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
DCHECK(!src.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.276.
// cond(31-28) | 01101111(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.273.
// cond(31-28) | 01101111(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
DCHECK(!dst.is(pc));
DCHECK(!src1.is(pc));
DCHECK(!src2.is(pc));
DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
@ -2437,6 +2491,12 @@ void Assembler::vstm(BlockAddrMode am,
}
void Assembler::vmov(const SwVfpRegister dst, float imm) {
mov(ip, Operand(bit_cast<int32_t>(imm)));
vmov(dst, ip);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);

17
deps/v8/src/arm/assembler-arm.h

@ -1034,12 +1034,20 @@ class Assembler : public AssemblerBase {
void pkhtb(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void uxtb(Register dst, const Operand& src, Condition cond = al);
void uxtab(Register dst, Register src1, const Operand& src2,
void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
void uxtb16(Register dst, const Operand& src, Condition cond = al);
void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
// Status register access instructions
@ -1172,6 +1180,7 @@ class Assembler : public AssemblerBase {
SwVfpRegister last,
Condition cond = al);
void vmov(const SwVfpRegister dst, float imm);
void vmov(const DwVfpRegister dst,
double imm,
const Register scratch = no_reg);

16
deps/v8/src/arm/builtins-arm.cc

@ -372,13 +372,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ ldr(r4, bit_field3);
__ DecodeField<Map::ConstructionCount>(r3, r4);
__ cmp(r3, Operand(JSFunction::kNoSlackTracking));
__ b(eq, &allocate);
__ DecodeField<Map::Counter>(r3, r4);
__ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
__ b(lt, &allocate);
// Decrease generous allocation count.
__ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift));
__ sub(r4, r4, Operand(1 << Map::Counter::kShift));
__ str(r4, bit_field3);
__ cmp(r3, Operand(JSFunction::kFinishSlackTracking));
__ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
__ b(ne, &allocate);
__ push(r1);
@ -431,9 +431,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Check if slack tracking is enabled.
__ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
__ DecodeField<Map::ConstructionCount>(ip);
__ cmp(ip, Operand(JSFunction::kNoSlackTracking));
__ b(eq, &no_inobject_slack_tracking);
__ DecodeField<Map::Counter>(ip);
__ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
__ b(lt, &no_inobject_slack_tracking);
// Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));

199
deps/v8/src/arm/code-stubs-arm.cc

@ -234,61 +234,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
stub1.GetCode();
stub2.GetCode();
}
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ cmp(the_int(), Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ mov(scratch(), Operand(non_smi_exponent));
// Set the sign bit in scratch_ if the value was negative.
__ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
// Subtract from 0 if the value was negative.
__ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
__ str(scratch(),
FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
__ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
__ str(scratch(),
FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
__ bind(&max_negative_int);
// The max negative int32 is stored as a positive number in the mantissa of
// a double because it uses a sign bit instead of using two's complement.
// The actual mantissa bits stored are all 0 because the implicit most
// significant 1 bit is not stored.
non_smi_exponent += 1 << HeapNumber::kExponentShift;
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
__ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
__ mov(ip, Operand::Zero());
__ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
__ Ret();
}
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
@ -967,7 +912,6 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
@ -1494,9 +1438,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
r4, &miss);
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@ -1509,10 +1458,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register scratch = r3;
Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
// when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
@ -2686,6 +2641,10 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
@ -2724,37 +2683,70 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
Label miss;
Label uninitialized, miss;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &slow_start);
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
__ jmp(&miss);
}
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &uninitialized);
// We are going megamorphic. If the feedback is a JSFunction, it is fine
// to handle it here. More complex cases are dealt with in the runtime.
__ AssertNotSmi(r4);
__ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &miss);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ sub(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
__ ldr(r4, FieldMemOperand(r2, generic_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, generic_offset));
__ jmp(&slow_start);
__ bind(&uninitialized);
// We are going monomorphic, provided we actually have a JSFunction.
__ JumpIfSmi(r1, &miss);
// Goto miss case if we do not have a function.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &miss);
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(eq, &miss);
if (!FLAG_trace_ic) {
// We are going megamorphic. If the feedback is a JSFunction, it is fine
// to handle it here. More complex cases are dealt with in the runtime.
__ AssertNotSmi(r4);
__ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &miss);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ sub(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
__ ldr(r4, FieldMemOperand(r2, generic_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, generic_offset));
__ jmp(&slow_start);
}
// Update stats.
__ ldr(r4, FieldMemOperand(r2, with_types_offset));
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
// Store the function.
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r1, MemOperand(r4, 0));
// We are here because tracing is on or we are going monomorphic.
// Update the write barrier.
__ mov(r5, r1);
__ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ jmp(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
@ -3189,18 +3181,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in r0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(r0, &check_heap_number);
Label not_smi;
__ JumpIfNotSmi(r0, &not_smi);
__ Ret();
__ bind(&not_smi);
__ bind(&check_heap_number);
Label not_heap_number;
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kHeapNumberMapRootIndex);
__ b(ne, &call_builtin);
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
// r0: object
// r1: instance type.
__ cmp(r1, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &not_heap_number);
__ Ret();
__ bind(&not_heap_number);
Label not_string, slow_string;
__ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
__ b(hs, &not_string);
// Check if string has a cached array index.
__ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
__ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
__ b(ne, &slow_string);
__ IndexFromHash(r2, r0);
__ Ret();
__ bind(&slow_string);
__ push(r0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
__ bind(&not_string);
Label not_oddball;
__ cmp(r1, Operand(ODDBALL_TYPE));
__ b(ne, &not_oddball);
__ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
__ Ret();
__ bind(&not_oddball);
__ bind(&call_builtin);
__ push(r0);
__ push(r0); // Push argument.
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}

50
deps/v8/src/arm/code-stubs-arm.h

@ -46,44 +46,6 @@ class StringHelper : public AllStatic {
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
Register the_heap_number, Register scratch)
: PlatformCodeStub(isolate) {
minor_key_ = IntRegisterBits::encode(the_int.code()) |
HeapNumberRegisterBits::encode(the_heap_number.code()) |
ScratchRegisterBits::encode(scratch.code());
}
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int() const {
return Register::from_code(IntRegisterBits::decode(minor_key_));
}
Register the_heap_number() const {
return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
}
Register scratch() const {
return Register::from_code(ScratchRegisterBits::decode(minor_key_));
}
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
};
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate,
@ -112,7 +74,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
@ -235,9 +197,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
inline Major MajorKey() const FINAL { return RecordWrite; }
virtual void Generate(MacroAssembler* masm) OVERRIDE;
void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@ -245,7 +207,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) {
void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@ -293,7 +255,7 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() { return true; }
bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@ -325,7 +287,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;

8
deps/v8/src/arm/codegen-arm.cc

@ -288,8 +288,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ uxtb16(temp3, Operand(temp1, ROR, 0));
__ uxtb16(temp4, Operand(temp1, ROR, 8));
__ uxtb16(temp3, temp1);
__ uxtb16(temp4, temp1, 8);
__ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
__ str(temp1, MemOperand(dest));
__ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
@ -301,9 +301,9 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
__ b(&not_two, cc);
__ ldrh(temp1, MemOperand(src, 2, PostIndex));
__ uxtb(temp3, Operand(temp1, ROR, 8));
__ uxtb(temp3, temp1, 8);
__ mov(temp3, Operand(temp3, LSL, 16));
__ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
__ uxtab(temp3, temp3, temp1);
__ str(temp3, MemOperand(dest, 4, PostIndex));
__ bind(&not_two);
__ ldrb(temp1, MemOperand(src), ne);

4
deps/v8/src/arm/constants-arm.h

@ -332,9 +332,9 @@ enum NeonSize {
// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes {
// transition to C code
kCallRtRedirected= 0x10,
kCallRtRedirected = 0x10,
// break point
kBreakpoint= 0x20,
kBreakpoint = 0x20,
// stop
kStopCode = 1 << 23
};

2
deps/v8/src/arm/cpu-arm.cc

@ -27,6 +27,8 @@ namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
if (size == 0) return;
if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator

6
deps/v8/src/arm/deoptimizer-arm.cc

@ -21,6 +21,12 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the

160
deps/v8/src/arm/disasm-arm.cc

@ -1027,7 +1027,75 @@ void Decoder::DecodeType3(Instruction* instr) {
UNREACHABLE();
break;
case 1:
UNREACHABLE();
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "sxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
break;
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
break;
}
}
} else {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "sxth'cond 'rd, 'rm");
break;
case 1:
Format(instr, "sxth'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "sxth'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "sxth'cond 'rd, 'rm, ror #24");
break;
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
break;
}
}
}
} else {
UNREACHABLE();
}
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@ -1054,36 +1122,70 @@ void Decoder::DecodeType3(Instruction* instr) {
}
break;
case 3:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
break;
if ((instr->Bits(9, 6) == 1)) {
if ((instr->Bit(20) == 0)) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
break;
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
break;
}
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
break;
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxth'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxth'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "uxth'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "uxth'cond 'rd, 'rm, ror #24");
break;
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
break;
}
}
}
} else {

112
deps/v8/src/arm/full-codegen-arm.cc

@ -195,10 +195,10 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
@ -937,7 +937,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
__ LoadContext(r1, scope_->ContextChainLength(scope_->ScriptScope()));
__ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
__ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
@ -1111,6 +1111,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@ -1214,6 +1215,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
SetExpressionPosition(stmt->each());
// Load the current count to r0, load the length to r1.
__ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
__ cmp(r0, r1); // Compare to the array length.
@ -1283,48 +1286,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Comment cmnt(masm_, "[ ForOfStatement");
SetStatementPosition(stmt);
Iteration loop_statement(this, stmt);
increment_loop_depth();
// var iterator = iterable[Symbol.iterator]();
VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ bind(loop_statement.continue_label());
// result = iterator.next()
VisitForEffect(stmt->next_result());
// if (result.done) break;
Label result_not_done;
VisitForControl(stmt->result_done(),
loop_statement.break_label(),
&result_not_done,
&result_not_done);
__ bind(&result_not_done);
// each = result.value
VisitForEffect(stmt->assign_each());
// Generate code for the body of the loop.
Visit(stmt->body());
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
__ jmp(loop_statement.continue_label());
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@ -1383,6 +1344,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
}
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset) {
if (NeedsHomeObject(initializer)) {
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
CallStoreIC();
}
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@ -1739,6 +1713,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
__ Move(StoreDescriptor::ReceiverRegister(), r0);
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(), MemOperand(sp));
CallStoreIC();
}
} else {
VisitForEffect(value);
}
@ -1750,6 +1732,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
EmitSetHomeObjectIfNeeded(value, 2);
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
__ CallRuntime(Runtime::kSetProperty, 4);
@ -1787,7 +1770,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@ -2210,15 +2195,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
VisitForAccumulatorValue(value);
__ pop(r1);
// Check generator state.
Label wrong_state, closed_state, done;
__ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ cmp(r3, Operand(Smi::FromInt(0)));
__ b(eq, &closed_state);
__ b(lt, &wrong_state);
// Load suspended function and context.
__ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@ -2241,7 +2217,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame;
Label resume_frame, done;
__ bind(&push_frame);
__ bl(&resume_frame);
__ jmp(&done);
@ -2301,26 +2277,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
// Reach here when generator is closed.
__ bind(&closed_state);
if (resume_mode == JSGeneratorObject::NEXT) {
// Return completed iterator result when generator is closed.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
__ push(r0);
__ CallRuntime(Runtime::kThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
__ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
}
@ -2534,6 +2490,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ push(scratch);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@ -2728,8 +2685,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Non-initializing assignments to consts are ignored.
}
@ -5085,7 +5043,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
if (declaration_scope->is_script_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global

28
deps/v8/src/arm/lithium-arm.cc

@ -1098,9 +1098,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (FLAG_vector_ics) {
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
vector =
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
}
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
context, receiver_register, name_register, slot, vector);
}
@ -1397,8 +1405,16 @@ LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
LOperand* divisor = UseRegister(instr->right());
LOperand* temp =
CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
LInstruction* result =
DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)))) {
result = AssignEnvironment(result);
}
return result;
}
@ -2111,7 +2127,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@ -2170,7 +2186,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@ -2237,7 +2253,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}

162
deps/v8/src/arm/lithium-arm.h

@ -166,17 +166,13 @@ class LCodeGen;
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const FINAL { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) FINAL; \
const char* Mnemonic() const FINAL { return mnemonic; } \
static L##type* cast(LInstruction* instr) { \
DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
@ -291,11 +287,9 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
virtual bool HasResult() const FINAL OVERRIDE {
return R != 0 && result() != NULL;
}
bool HasResult() const FINAL { return R != 0 && result() != NULL; }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
LOperand* result() const OVERRIDE { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@ -313,11 +307,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
virtual int InputCount() FINAL OVERRIDE { return I; }
virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
int InputCount() FINAL { return I; }
LOperand* InputAt(int i) FINAL { return inputs_[i]; }
virtual int TempCount() FINAL OVERRIDE { return T; }
virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
int TempCount() FINAL { return T; }
LOperand* TempAt(int i) FINAL { return temps_[i]; }
};
@ -332,8 +326,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
virtual bool IsGap() const OVERRIDE { return true; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool IsGap() const OVERRIDE { return true; }
void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@ -373,7 +367,7 @@ class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@ -385,10 +379,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
virtual bool IsControl() const OVERRIDE { return true; }
void PrintDataTo(StringStream* stream) OVERRIDE;
bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@ -431,7 +425,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
virtual bool IsControl() const OVERRIDE { return true; }
bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@ -442,12 +436,10 @@ class LLabel FINAL : public LGap {
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@ -465,7 +457,7 @@ class LLabel FINAL : public LGap {
class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
@ -484,30 +476,33 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
class LTailCallThroughMegamorphicCache FINAL
: public LTemplateInstruction<0, 3, 0> {
: public LTemplateInstruction<0, 5, 0> {
public:
explicit LTailCallThroughMegamorphicCache(LOperand* context,
LOperand* receiver,
LOperand* name) {
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name, LOperand* slot,
LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
inputs_[3] = slot;
inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
LOperand* slot() { return inputs_[3]; }
LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@ -517,7 +512,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
virtual bool IsControl() const FINAL OVERRIDE { return true; }
bool IsControl() const FINAL { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@ -606,7 +601,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -869,7 +864,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1053,7 +1048,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1070,7 +1065,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1085,7 +1080,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1103,7 +1098,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1125,7 +1120,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1141,7 +1136,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1171,7 +1166,7 @@ class LHasCachedArrayIndexAndBranch FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1189,7 +1184,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1401,7 +1396,7 @@ class LBranch FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1546,11 +1541,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
virtual const char* Mnemonic() const OVERRIDE;
Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
void CompileToNative(LCodeGen* generator) OVERRIDE;
const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@ -1574,11 +1567,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
virtual const char* Mnemonic() const OVERRIDE;
Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
void CompileToNative(LCodeGen* generator) OVERRIDE;
const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@ -1687,7 +1678,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@ -1768,7 +1759,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1787,7 +1778,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1826,7 +1817,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@ -1843,7 +1834,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@ -1887,7 +1878,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1907,11 +1898,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
const CallInterfaceDescriptor descriptor() { return descriptor_; }
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
@ -1919,11 +1911,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
ZoneList<LOperand*> inputs_;
// Iterator support.
virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
int InputCount() FINAL { return inputs_.length(); }
LOperand* InputAt(int i) FINAL { return inputs_[i]; }
virtual int TempCount() FINAL OVERRIDE { return 0; }
virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
int TempCount() FINAL { return 0; }
LOperand* TempAt(int i) FINAL { return NULL; }
};
@ -1940,7 +1932,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1976,7 +1968,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1995,7 +1987,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -2012,7 +2004,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@ -2206,7 +2198,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@ -2229,7 +2221,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@ -2261,7 +2253,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@ -2293,7 +2285,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@ -2317,7 +2309,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@ -2611,7 +2603,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -2632,9 +2624,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
@ -2839,7 +2829,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();

183
deps/v8/src/arm/lithium-codegen-arm.cc

@ -27,9 +27,9 @@ class SafepointGenerator FINAL : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
virtual void BeforeCall(int call_size) const OVERRIDE {}
void BeforeCall(int call_size) const OVERRIDE {}
virtual void AfterCall() const OVERRIDE {
void AfterCall() const OVERRIDE {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
@ -2785,11 +2785,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
@ -2964,6 +2964,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ add(sp, sp, Operand(sp_delta));
}
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
@ -2995,13 +2996,17 @@ template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
DCHECK(slot_register.is(r0));
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
int index = vector->GetIndex(instr->hydrogen()->slot());
__ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
@ -3760,10 +3765,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LMathAbs* instr_;
};
@ -3982,54 +3988,91 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
Register scratch = r4;
Register extra = r5;
Register extra2 = r6;
Register extra3 = r9;
Register scratch = r3;
Register extra = r4;
Register extra2 = r5;
Register extra3 = r6;
#ifdef DEBUG
Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
DCHECK(!FLAG_vector_ics ||
!AreAliased(slot, vector, scratch, extra, extra2, extra3));
#endif
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
must_teardown_frame, receiver, name,
scratch, extra, extra2, extra3);
if (!instr->hydrogen()->is_just_miss()) {
DCHECK(!instr->hydrogen()->is_keyed_load());
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
receiver, name, scratch, extra, extra2, extra3);
}
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
LoadIC::GenerateMiss(masm());
if (instr->hydrogen()->is_keyed_load()) {
KeyedLoadIC::GenerateMiss(masm());
} else {
LoadIC::GenerateMiss(masm());
}
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
PlatformInterfaceDescriptor* call_descriptor =
instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
if (instr->hydrogen()->IsTailCall()) {
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
// Make sure we don't emit any additional entries in the constant pool
// before the call to ensure that the CallCodeSize() calculated the
// correct
// number of instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
}
__ Jump(target);
}
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
// Make sure we don't emit any additional entries in the constant pool
// before the call to ensure that the CallCodeSize() calculated the correct
// number of instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
PlatformInterfaceDescriptor* call_descriptor =
instr->descriptor().platform_specific_descriptor();
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
call_descriptor->storage_mode());
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
// Make sure we don't emit any additional entries in the constant pool
// before the call to ensure that the CallCodeSize() calculated the
// correct
// number of instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
}
__ Call(target);
}
__ Call(target);
generator.AfterCall();
}
generator.AfterCall();
}
@ -4529,10 +4572,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
codegen()->DoDeferredStringCharCodeAt(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -4585,10 +4627,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredStringCharFromCode(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -4662,14 +4705,15 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
SIGNED_INT32);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagI* instr_;
};
@ -4689,14 +4733,15 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagIU(instr_,
instr_->value(),
instr_->temp1(),
instr_->temp2(),
UNSIGNED_INT32);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagU* instr_;
};
@ -4783,10 +4828,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
codegen()->DoDeferredNumberTagD(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LNumberTagD* instr_;
};
@ -5002,10 +5046,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
codegen()->DoDeferredTaggedToI(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LTaggedToI* instr_;
};
@ -5199,11 +5242,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
@ -5327,10 +5371,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
codegen()->DoDeferredAllocate(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LAllocate* instr_;
};
@ -5692,10 +5735,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() OVERRIDE {
codegen()->DoDeferredStackCheck(instr_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LStackCheck* instr_;
};
@ -5848,10 +5890,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;

45
deps/v8/src/arm/macro-assembler-arm.cc

@ -1699,11 +1699,12 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
}
bind(&done);
// Check that the value is a normal property.
// Check that the value is a field property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
DCHECK_EQ(FIELD, 0);
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
b(ne, miss);
@ -2251,23 +2252,37 @@ void MacroAssembler::CheckMap(Register obj,
}
void MacroAssembler::DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
cmp(scratch, ip);
ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
Jump(success, RelocInfo::CODE_TARGET, eq);
bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
mov(scratch, Operand(cell));
ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
cmp(value, scratch);
}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
JumpIfSmi(value, miss);
}
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@ -3653,18 +3668,6 @@ void MacroAssembler::CheckPageFlag(
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
mov(scratch, Operand(map));
ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
tst(scratch, Operand(Map::Deprecated::kMask));
b(ne, if_deprecated);
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,

24
deps/v8/src/arm/macro-assembler-arm.h

@ -200,10 +200,6 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* condition_met);
void CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
@ -918,15 +914,19 @@ class MacroAssembler: public Assembler {
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.

198
deps/v8/src/arm/simulator-arm.cc

@ -2629,7 +2629,89 @@ void Simulator::DecodeType3(Instruction* instr) {
UNIMPLEMENTED();
break;
case 1:
UNIMPLEMENTED();
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
if (instr->Bits(19, 16) == 0xF) {
// Sxtb.
int32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, static_cast<int8_t>(rm_val));
} else {
// Sxtab.
int32_t rn_val = get_register(rn);
int32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, rn_val + static_cast<int8_t>(rm_val));
}
} else {
if (instr->Bits(19, 16) == 0xF) {
// Sxth.
int32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, static_cast<int16_t>(rm_val));
} else {
// Sxtah.
int32_t rn_val = get_register(rn);
int32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, rn_val + static_cast<int16_t>(rm_val));
}
}
} else {
UNREACHABLE();
}
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@ -2650,8 +2732,7 @@ void Simulator::DecodeType3(Instruction* instr) {
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd,
(rm_val & 0xFF) | (rm_val & 0xFF0000));
set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
} else {
UNIMPLEMENTED();
}
@ -2660,44 +2741,85 @@ void Simulator::DecodeType3(Instruction* instr) {
}
break;
case 3:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
// Uxtb.
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
if ((instr->Bits(9, 6) == 1)) {
if (instr->Bit(20) == 0) {
if (instr->Bits(19, 16) == 0xF) {
// Uxtb.
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, (rm_val & 0xFF));
} else {
// Uxtab.
uint32_t rn_val = get_register(rn);
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, rn_val + (rm_val & 0xFF));
}
set_register(rd, (rm_val & 0xFF));
} else {
// Uxtab.
uint32_t rn_val = get_register(rn);
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
if (instr->Bits(19, 16) == 0xF) {
// Uxth.
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, (rm_val & 0xFFFF));
} else {
// Uxtah.
uint32_t rn_val = get_register(rn);
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, rn_val + (rm_val & 0xFFFF));
}
set_register(rd, rn_val + (rm_val & 0xFF));
}
} else {
UNIMPLEMENTED();

4
deps/v8/src/arm64/assembler-arm64-inl.h

@ -503,7 +503,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
DCHECK(addrmode == Offset);
regoffset_ = offset.reg();
shift_= offset.shift();
shift_ = offset.shift();
shift_amount_ = offset.shift_amount();
extend_ = NO_EXTEND;
@ -520,7 +520,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
extend_ = offset.extend();
shift_amount_ = offset.shift_amount();
shift_= NO_SHIFT;
shift_ = NO_SHIFT;
offset_ = 0;
// These assertions match those in the extended-register constructor.

30
deps/v8/src/arm64/assembler-arm64.cc

@ -44,22 +44,27 @@ namespace internal {
// CpuFeatures implementation.
void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cross_compile) {
// Always align csp in cross compiled code - this is safe and ensures that
// csp will always be aligned if it is enabled by probing at runtime.
if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
} else {
base::CPU cpu;
if (FLAG_enable_always_align_csp &&
(cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
supported_ |= 1u << ALWAYS_ALIGN_CSP;
}
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
// Probe for runtime features
base::CPU cpu;
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER) {
supported_ |= 1u << COHERENT_CACHE;
}
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() { }
void CpuFeatures::PrintFeatures() {
printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
}
// -----------------------------------------------------------------------------
@ -612,9 +617,12 @@ void Assembler::Align(int m) {
void Assembler::CheckLabelLinkChain(Label const * label) {
#ifdef DEBUG
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
int linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;

16
deps/v8/src/arm64/builtins-arm64.cc

@ -367,13 +367,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FieldMemOperand(init_map, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ Ldr(x4, bit_field3);
__ DecodeField<Map::ConstructionCount>(constructon_count, x4);
__ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ B(eq, &allocate);
__ DecodeField<Map::Counter>(constructon_count, x4);
__ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
__ B(lt, &allocate);
// Decrease generous allocation count.
__ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
__ Subs(x4, x4, Operand(1 << Map::Counter::kShift));
__ Str(x4, bit_field3);
__ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
__ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
__ B(ne, &allocate);
// Push the constructor and map to the stack, and the constructor again
@ -381,7 +381,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(constructor, init_map, constructor);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(init_map, constructor);
__ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
__ Bind(&allocate);
}
@ -434,8 +434,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
__ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ B(eq, &no_inobject_slack_tracking);
__ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
__ B(lt, &no_inobject_slack_tracking);
constructon_count = NoReg;
// Fill the pre-allocated fields with undef.

163
deps/v8/src/arm64/code-stubs-arm64.cc

@ -1412,6 +1412,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@ -1429,9 +1434,15 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register result = x0;
Register scratch = x3;
Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
// when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
@ -3016,6 +3027,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
Label slow, non_function, wrap, cont;
Label have_js_function;
@ -3064,35 +3079,72 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
Label miss;
Label uninitialized, miss;
__ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
__ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
if (!FLAG_trace_ic) {
// We are going megamorphic. If the feedback is a JSFunction, it is fine
// to handle it here. More complex cases are dealt with in the runtime.
__ AssertNotSmi(x4);
__ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
const int with_types_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Subs(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
__ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
__ B(&slow_start);
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
__ jmp(&miss);
}
// We are here because tracing is on or we are going monomorphic.
__ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
// We are going megamorphic. If the feedback is a JSFunction, it is fine
// to handle it here. More complex cases are dealt with in the runtime.
__ AssertNotSmi(x4);
__ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
// We have to update statistics for runtime profiling.
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Subs(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
__ B(&slow_start);
__ bind(&uninitialized);
// We are going monomorphic, provided we actually have a JSFunction.
__ JumpIfSmi(function, &miss);
// Goto miss case if we do not have a function.
__ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
__ Cmp(function, x5);
__ B(eq, &miss);
// Update stats.
__ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
// Store the function.
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
__ Add(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
__ Str(function, MemOperand(x4, 0));
// Update the write barrier.
__ Mov(x5, function);
__ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&have_js_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
@ -3835,16 +3887,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in x0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(x0, &check_heap_number);
Label not_smi;
__ JumpIfNotSmi(x0, &not_smi);
__ Ret();
__ bind(&check_heap_number);
__ JumpIfNotHeapNumber(x0, &call_builtin);
__ Bind(&not_smi);
Label not_heap_number;
__ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
__ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
// x0: object
// x1: instance type
__ Cmp(x1, HEAP_NUMBER_TYPE);
__ B(ne, &not_heap_number);
__ Ret();
__ Bind(&not_heap_number);
Label not_string, slow_string;
__ Cmp(x1, FIRST_NONSTRING_TYPE);
__ B(hs, &not_string);
// Check if string has a cached array index.
__ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
__ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
__ B(ne, &slow_string);
__ IndexFromHash(x2, x0);
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
__ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
__ Bind(&not_string);
Label not_oddball;
__ Cmp(x1, ODDBALL_TYPE);
__ B(ne, &not_oddball);
__ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
__ Ret();
__ Bind(&not_oddball);
__ bind(&call_builtin);
__ push(x0);
__ Push(x0); // Push argument.
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
}
@ -4293,18 +4372,10 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
}
static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
// The entry hook is a "BumpSystemStackPointer" instruction (sub),
// followed by a "Push lr" instruction, followed by a call.
unsigned int size =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
// "BumpSystemStackPointer".
size += kInstructionSize;
}
return size;
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@ -4317,7 +4388,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ Push(lr);
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
GetProfileEntryHookCallSize(masm));
kProfileEntryHookCallSize);
__ Pop(lr);
}
@ -4335,7 +4406,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
__ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
__ Sub(x0, lr, kProfileEntryHookCallSize);
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =

12
deps/v8/src/arm64/code-stubs-arm64.h

@ -97,7 +97,7 @@ class RecordWriteStub: public PlatformCodeStub {
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
@ -275,9 +275,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
inline Major MajorKey() const FINAL { return RecordWrite; }
virtual void Generate(MacroAssembler* masm) OVERRIDE;
void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
@ -285,7 +285,7 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) {
void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
@ -328,7 +328,7 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() { return true; }
bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@ -360,7 +360,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Register scratch1,
Register scratch2);
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;

2
deps/v8/src/arm64/cpu-arm64.cc

@ -43,6 +43,8 @@ class CacheLineSizes {
void CpuFeatures::FlushICache(void* address, size_t length) {
if (length == 0) return;
if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
#ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address
// run has been synced.

5
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -21,6 +21,11 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the

112
deps/v8/src/arm64/full-codegen-arm64.cc

@ -196,10 +196,10 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo()));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewGlobalContext, 2);
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
@ -934,7 +934,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
__ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
__ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
__ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
@ -1109,6 +1109,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
Register null_value = x15;
@ -1202,6 +1203,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
SetExpressionPosition(stmt->each());
// Load the current count to x0, load the length to x1.
__ PeekPair(x0, x1, 0);
__ Cmp(x0, x1); // Compare to the array length.
@ -1271,48 +1274,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Comment cmnt(masm_, "[ ForOfStatement");
SetStatementPosition(stmt);
Iteration loop_statement(this, stmt);
increment_loop_depth();
// var iterator = iterable[Symbol.iterator]();
VisitForEffect(stmt->assign_iterator());
// Loop entry.
__ Bind(loop_statement.continue_label());
// result = iterator.next()
VisitForEffect(stmt->next_result());
// if (result.done) break;
Label result_not_done;
VisitForControl(stmt->result_done(),
loop_statement.break_label(),
&result_not_done,
&result_not_done);
__ Bind(&result_not_done);
// each = result.value
VisitForEffect(stmt->assign_each());
// Generate code for the body of the loop.
Visit(stmt->body());
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
__ B(loop_statement.continue_label());
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ Bind(loop_statement.break_label());
decrement_loop_depth();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new space for
@ -1372,6 +1333,18 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
}
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset) {
if (NeedsHomeObject(initializer)) {
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
CallStoreIC();
}
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
@ -1721,6 +1694,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
__ Mov(StoreDescriptor::ReceiverRegister(), x0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), 0);
CallStoreIC();
}
} else {
VisitForEffect(value);
}
@ -1732,6 +1713,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
__ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
@ -1769,7 +1751,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@ -2203,6 +2187,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ Push(scratch);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@ -2388,8 +2373,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Non-initializing assignments to consts are ignored.
}
@ -4909,7 +4895,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
Register value_reg = x0;
Register generator_object = x1;
Register the_hole = x2;
Register operand_stack_size = w3;
@ -4923,15 +4908,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
VisitForAccumulatorValue(value);
__ Pop(generator_object);
// Check generator state.
Label wrong_state, closed_state, done;
__ Ldr(x10, FieldMemOperand(generator_object,
JSGeneratorObject::kContinuationOffset));
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
__ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
__ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
// Load suspended function and context.
__ Ldr(cp, FieldMemOperand(generator_object,
JSGeneratorObject::kContextOffset));
@ -4957,7 +4933,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame;
Label resume_frame, done;
__ Bl(&resume_frame);
__ B(&done);
@ -5002,26 +4978,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// Not reached: the runtime call returns elsewhere.
__ Unreachable();
// Reach here when generator is closed.
__ Bind(&closed_state);
if (resume_mode == JSGeneratorObject::NEXT) {
// Return completed iterator result when generator is closed.
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
__ Push(x10);
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
__ Push(value_reg);
__ CallRuntime(Runtime::kThrow, 1);
}
__ B(&done);
// Throw error if we attempt to operate on a running generator.
__ Bind(&wrong_state);
__ Push(generator_object);
__ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ Bind(&done);
context()->Plug(result_register());
}
@ -5111,7 +5067,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* declaration_scope = scope()->DeclarationScope();
if (declaration_scope->is_global_scope() ||
if (declaration_scope->is_script_scope() ||
declaration_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global

18
deps/v8/src/arm64/lithium-arm64.cc

@ -1564,9 +1564,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (FLAG_vector_ics) {
slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
vector =
UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
}
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
context, receiver_register, name_register, slot, vector);
}
@ -1675,7 +1683,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object =
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@ -1738,7 +1746,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@ -1760,7 +1768,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (FLAG_vector_ics) {
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
}
@ -2400,7 +2408,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp1 = NULL;
if (instr->access().IsExternalMemory() ||
instr->field_representation().IsDouble()) {
(!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) {
value = UseRegister(instr->value());
} else if (instr->NeedsWriteBarrier()) {
value = UseRegisterAndClobber(instr->value());

163
deps/v8/src/arm64/lithium-arm64.h

@ -178,17 +178,13 @@ class LCodeGen;
V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
} \
virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE; \
virtual const char* Mnemonic() const FINAL OVERRIDE { \
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const FINAL { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) FINAL; \
const char* Mnemonic() const FINAL { return mnemonic; } \
static L##type* cast(LInstruction* instr) { \
DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
@ -295,11 +291,9 @@ class LTemplateResultInstruction : public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
virtual bool HasResult() const FINAL OVERRIDE {
return (R != 0) && (result() != NULL);
}
bool HasResult() const FINAL { return (R != 0) && (result() != NULL); }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
LOperand* result() const OVERRIDE { return results_[0]; }
protected:
EmbeddedContainer<LOperand*, R> results_;
@ -317,28 +311,32 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
private:
// Iterator support.
virtual int InputCount() FINAL OVERRIDE { return I; }
virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
int InputCount() FINAL { return I; }
LOperand* InputAt(int i) FINAL { return inputs_[i]; }
virtual int TempCount() FINAL OVERRIDE { return T; }
virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
int TempCount() FINAL { return T; }
LOperand* TempAt(int i) FINAL { return temps_[i]; }
};
class LTailCallThroughMegamorphicCache FINAL
: public LTemplateInstruction<0, 3, 0> {
: public LTemplateInstruction<0, 5, 0> {
public:
explicit LTailCallThroughMegamorphicCache(LOperand* context,
LOperand* receiver,
LOperand* name) {
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name, LOperand* slot,
LOperand* vector) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
inputs_[3] = slot;
inputs_[4] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
LOperand* slot() { return inputs_[3]; }
LOperand* vector() { return inputs_[4]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
@ -348,9 +346,7 @@ class LTailCallThroughMegamorphicCache FINAL
class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
@ -360,7 +356,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
public:
LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
virtual bool IsControl() const FINAL OVERRIDE { return true; }
bool IsControl() const FINAL { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@ -410,8 +406,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
}
// Can't use the DECLARE-macro here because of sub-classes.
virtual bool IsGap() const OVERRIDE { return true; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
bool IsGap() const OVERRIDE { return true; }
void PrintDataTo(StringStream* stream) OVERRIDE;
static LGap* cast(LInstruction* instr) {
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
@ -451,7 +447,7 @@ class LInstructionGap FINAL : public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return !IsRedundant();
}
@ -492,10 +488,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(HBasicBlock* block) : block_(block) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
virtual bool IsControl() const OVERRIDE { return true; }
void PrintDataTo(StringStream* stream) OVERRIDE;
bool IsControl() const OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
@ -525,12 +521,10 @@ class LLabel FINAL : public LGap {
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
@ -550,9 +544,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
return false;
}
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
};
@ -573,7 +565,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -721,11 +713,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticD;
}
virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
virtual const char* Mnemonic() const OVERRIDE;
Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
void CompileToNative(LCodeGen* generator) OVERRIDE;
const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@ -749,11 +739,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
LOperand* right() { return inputs_[2]; }
Token::Value op() const { return op_; }
virtual Opcode opcode() const OVERRIDE {
return LInstruction::kArithmeticT;
}
virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
virtual const char* Mnemonic() const OVERRIDE;
Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
void CompileToNative(LCodeGen* generator) OVERRIDE;
const char* Mnemonic() const OVERRIDE;
private:
Token::Value op_;
@ -838,7 +826,7 @@ class LBranch FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -853,7 +841,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -889,7 +877,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -908,7 +896,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -925,7 +913,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
@ -1097,7 +1085,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1215,7 +1203,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1313,7 +1301,7 @@ class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
public:
virtual bool IsControl() const OVERRIDE { return true; }
bool IsControl() const OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@ -1447,7 +1435,7 @@ class LHasCachedArrayIndexAndBranch FINAL
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1465,7 +1453,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1479,7 +1467,7 @@ class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* base_object() const { return inputs_[0]; }
LOperand* offset() const { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
};
@ -1555,11 +1543,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
CallInterfaceDescriptor descriptor() { return descriptor_; }
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
@ -1567,11 +1556,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
ZoneList<LOperand*> inputs_;
// Iterator support.
virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
int InputCount() FINAL { return inputs_.length(); }
LOperand* InputAt(int i) FINAL { return inputs_[i]; }
virtual int TempCount() FINAL OVERRIDE { return 0; }
virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
int TempCount() FINAL { return 0; }
LOperand* TempAt(int i) FINAL { return NULL; }
};
@ -1588,7 +1577,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1624,7 +1613,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 2> {
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1641,7 +1630,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1656,7 +1645,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1674,7 +1663,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -1691,7 +1680,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
int slot_index() const { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -2272,7 +2261,7 @@ class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
public:
virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
@ -2331,11 +2320,11 @@ class LPushArguments FINAL : public LTemplateResultInstruction<0> {
private:
// Iterator support.
virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
int InputCount() FINAL { return inputs_.length(); }
LOperand* InputAt(int i) FINAL { return inputs_[i]; }
virtual int TempCount() FINAL OVERRIDE { return 0; }
virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
int TempCount() FINAL { return 0; }
LOperand* TempAt(int i) FINAL { return NULL; }
};
@ -2585,7 +2574,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@ -2609,7 +2598,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Representation representation() const {
return hydrogen()->field_representation();
@ -2632,7 +2621,7 @@ class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@ -2707,7 +2696,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
Token::Value op() const { return hydrogen()->token(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -2786,7 +2775,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 1> {
LOperand* code_object() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@ -2810,7 +2799,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};
@ -2916,7 +2905,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
Handle<Map> transitioned_map() {
@ -2991,7 +2980,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 2> {
Handle<String> type_literal() const { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream) OVERRIDE;
void PrintDataTo(StringStream* stream) OVERRIDE;
};

121
deps/v8/src/arm64/lithium-codegen-arm64.cc

@ -557,11 +557,6 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp, zone());
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
@ -2047,23 +2042,33 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
Register scratch = x3;
Register extra = x4;
Register extra2 = x5;
Register extra3 = x6;
Register scratch = x4;
Register extra = x5;
Register extra2 = x6;
Register extra3 = x7;
DCHECK(!FLAG_vector_ics ||
!AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
scratch, extra, extra2, extra3));
// Important for the tail-call.
bool must_teardown_frame = NeedsEagerFrame();
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
must_teardown_frame, receiver, name,
scratch, extra, extra2, extra3);
if (!instr->hydrogen()->is_just_miss()) {
DCHECK(!instr->hydrogen()->is_keyed_load());
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
receiver, name, scratch, extra, extra2, extra3);
}
// Tail call to miss if we ended up here.
if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
LoadIC::GenerateMiss(masm());
if (instr->hydrogen()->is_keyed_load()) {
KeyedLoadIC::GenerateMiss(masm());
} else {
LoadIC::GenerateMiss(masm());
}
}
@ -2071,25 +2076,44 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
// TODO(all): on ARM we use a call descriptor to specify a storage mode
// but on ARM64 we only have one storage mode so it isn't necessary. Check
// this understanding is correct.
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
if (instr->hydrogen()->IsTailCall()) {
if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
// TODO(all): on ARM we use a call descriptor to specify a storage mode
// but on ARM64 we only have one storage mode so it isn't necessary. Check
// this understanding is correct.
__ Jump(code, RelocInfo::CODE_TARGET);
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Br(target);
}
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
// TODO(all): on ARM we use a call descriptor to specify a storage mode
// but on ARM64 we only have one storage mode so it isn't necessary. Check
// this understanding is correct.
__ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
generator.AfterCall();
}
generator.AfterCall();
after_push_argument_ = false;
}
@ -3372,13 +3396,17 @@ template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
DCHECK(slot_register.is(x0));
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
// No need to allocate this register.
DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
int index = vector->GetIndex(instr->hydrogen()->slot());
__ Mov(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
@ -3665,6 +3693,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
FPRegister result = ToDoubleRegister(instr->result());
__ Ldr(result, FieldMemOperand(object, offset));
return;
@ -4771,6 +4800,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
int parameter_count = ToInteger32(instr->constant_parameter_count());
__ Drop(parameter_count + 1);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register parameter_count = ToRegister(instr->parameter_count());
__ DropBySMI(parameter_count);
}
@ -5022,7 +5052,6 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
Register scratch2 = x6;
DCHECK(instr->IsMarkedAsCall());
ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
// TODO(all): if Mov could handle object in new space then it could be used
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
@ -5354,7 +5383,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ AssertNotSmi(object);
if (representation.IsDouble()) {
if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
@ -5363,8 +5392,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return;
}
Register value = ToRegister(instr->value());
DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsInteger32Constant(LConstantOperand::cast(instr->value())));
@ -5396,8 +5423,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
destination = temp0;
}
if (representation.IsSmi() &&
instr->hydrogen()->value()->representation().IsInteger32()) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
FPRegister value = ToDoubleRegister(instr->value());
__ Str(value, FieldMemOperand(object, offset));
} else if (representation.IsSmi() &&
instr->hydrogen()->value()->representation().IsInteger32()) {
DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
#ifdef DEBUG
Register temp0 = ToRegister(instr->temp0());
@ -5412,12 +5443,15 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
#endif
STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
STATIC_ASSERT(kSmiTag == 0);
Register value = ToRegister(instr->value());
__ Store(value, UntagSmiFieldMemOperand(destination, offset),
Representation::Integer32());
} else {
Register value = ToRegister(instr->value());
__ Store(value, FieldMemOperand(destination, offset), representation);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
__ RecordWriteField(destination,
offset,
value, // Clobbered.
@ -5985,10 +6019,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
virtual void Generate() OVERRIDE {
void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
virtual LInstruction* instr() OVERRIDE { return instr_; }
LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;

15
deps/v8/src/arm64/macro-assembler-arm64-inl.h

@ -1244,14 +1244,7 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(sp_));
if (!TmpList()->IsEmpty()) {
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Sub(temp, StackPointer(), space);
Bic(csp, temp, 0xf);
} else {
Sub(csp, StackPointer(), space);
}
Sub(csp, StackPointer(), space);
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means
@ -1288,11 +1281,7 @@ void MacroAssembler::SyncSystemStackPointer() {
DCHECK(emit_debug_code());
DCHECK(!csp.Is(sp_));
{ InstructionAccurateScope scope(this);
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
bic(csp, StackPointer(), 0xf);
} else {
mov(csp, StackPointer());
}
mov(csp, StackPointer());
}
AssertStackConsistency();
}

47
deps/v8/src/arm64/macro-assembler-arm64.cc

@ -1308,7 +1308,7 @@ void MacroAssembler::AssertStackConsistency() {
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
// much code to be generated.
if (emit_debug_code() && use_real_aborts()) {
if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
if (csp.Is(StackPointer())) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
@ -3788,23 +3788,38 @@ void MacroAssembler::CheckMap(Register obj_map,
}
void MacroAssembler::DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type) {
void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
Register scratch2, Handle<WeakCell> cell,
Handle<Code> success,
SmiCheckType smi_check_type) {
Label fail;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, &fail);
}
Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
Cmp(scratch, Operand(map));
Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
CmpWeakValue(scratch1, cell, scratch2);
B(ne, &fail);
Jump(success, RelocInfo::CODE_TARGET);
Bind(&fail);
}
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
Mov(scratch, Operand(cell));
Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
Cmp(value, scratch);
}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
JumpIfSmi(value, miss);
}
void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@ -4087,7 +4102,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Read the first word and compare to the global_context_map.
// Read the first word and compare to the native_context_map.
Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
Check(eq, kExpectedNativeContext);
@ -4213,10 +4228,11 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
}
Bind(&done);
// Check that the value is a normal property.
// Check that the value is a field property.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
DCHECK_EQ(FIELD, 0);
TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
// Get the value at the masked, scaled index and return.
@ -4633,17 +4649,6 @@ void MacroAssembler::HasColor(Register object,
}
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Mov(scratch, Operand(map));
Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
}
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,

35
deps/v8/src/arm64/macro-assembler-arm64.h

@ -761,9 +761,9 @@ class MacroAssembler : public Assembler {
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
// If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
// enabled, then csp will be dereferenced to cause the processor
// (or simulator) to abort if it is not properly aligned.
// If StackPointer() is the system stack pointer (csp), then csp will be
// dereferenced to cause the processor (or simulator) to abort if it is not
// properly aligned.
//
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
@ -831,9 +831,7 @@ class MacroAssembler : public Assembler {
inline void BumpSystemStackPointer(const Operand& space);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()). This function will ensure the
// new value of the system stack pointer is remains aligned to 16 bytes, and
// is lower than or equal to the value of the current stack pointer.
// pointer (according to StackPointer()).
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
@ -1480,14 +1478,19 @@ class MacroAssembler : public Assembler {
Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Test the bitfield of the heap object map with mask and set the condition
// flags. The object register is preserved.
@ -1777,10 +1780,6 @@ class MacroAssembler : public Assembler {
int mask,
Label* if_all_clear);
void CheckMapDeprecated(Handle<Map> map,
Register scratch,
Label* if_deprecated);
// Check if object is in new space and jump accordingly.
// Register 'object' is preserved.
void JumpIfNotInNewSpace(Register object,

2
deps/v8/src/arm64/simulator-arm64.cc

@ -413,7 +413,7 @@ void Simulator::ResetState() {
// Reset debug helpers.
breakpoints_.empty();
break_on_next_= false;
break_on_next_ = false;
}

2
deps/v8/src/array-iterator.js

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
"use strict";
// This file relies on the fact that the following declaration has been made

59
deps/v8/src/array.js

@ -238,7 +238,10 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Bail out if no moving is necessary.
if (num_additional_args === del_count) return;
// Move data to new array.
var new_array = new InternalArray(len - del_count + num_additional_args);
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
MathMin(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
var limit = indices;
@ -267,7 +270,12 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
} else if (key >= start_i + del_count) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
new_array[key - del_count + num_additional_args] = current;
var new_key = key - del_count + num_additional_args;
new_array[new_key] = current;
if (new_key > 0xffffffff) {
big_indices = big_indices || new InternalArray();
big_indices.push(new_key);
}
}
}
}
@ -275,6 +283,14 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
}
// Move contents of new_array into this array
%MoveArrayContents(new_array, array);
// Add any moved values that aren't elements anymore.
if (!IS_UNDEFINED(big_indices)) {
var length = big_indices.length;
for (var i = 0; i < length; ++i) {
var key = big_indices[i];
array[key] = new_array[key];
}
}
}
@ -282,9 +298,10 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// because the receiver is not an array (so we have no choice) or because we
// know we are not deleting or moving a lot of elements.
function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var is_array = IS_ARRAY(array);
for (var i = 0; i < del_count; i++) {
var index = start_i + i;
if (index in array) {
if (HAS_INDEX(array, index, is_array)) {
var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype).
@ -295,6 +312,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
function SimpleMove(array, start_i, del_count, len, num_additional_args) {
var is_array = IS_ARRAY(array);
if (num_additional_args !== del_count) {
// Move the existing elements after the elements to be deleted
// to the right position in the resulting array.
@ -302,7 +320,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = len - del_count; i > start_i; i--) {
var from_index = i + del_count - 1;
var to_index = i + num_additional_args - 1;
if (from_index in array) {
if (HAS_INDEX(array, from_index, is_array)) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
@ -312,7 +330,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = start_i; i < len - del_count; i++) {
var from_index = i + del_count;
var to_index = i + num_additional_args;
if (from_index in array) {
if (HAS_INDEX(array, from_index, is_array)) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
@ -966,7 +984,7 @@ function ArraySort(comparefn) {
// of a prototype property.
var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, length);
if (IS_NUMBER(indices)) {
// It's an interval.
@ -995,7 +1013,7 @@ function ArraySort(comparefn) {
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
var ShadowPrototypeElements = function(obj, from, to) {
for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
var indices = %GetArrayKeys(proto, to);
if (IS_NUMBER(indices)) {
// It's an interval.
@ -1063,7 +1081,7 @@ function ArraySort(comparefn) {
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in %GetPrototype(obj)) {
if (i in %_GetPrototype(obj)) {
obj[i] = UNDEFINED;
} else {
delete obj[i];
@ -1137,9 +1155,10 @@ function ArrayFilter(f, receiver) {
var result = new $Array();
var accumulator = new InternalArray();
var accumulator_length = 0;
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
@ -1172,9 +1191,10 @@ function ArrayForEach(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
@ -1205,9 +1225,10 @@ function ArraySome(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
@ -1237,9 +1258,10 @@ function ArrayEvery(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
@ -1270,9 +1292,10 @@ function ArrayMap(f, receiver) {
var result = new $Array();
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
for (var i = 0; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
@ -1407,10 +1430,11 @@ function ArrayReduce(callback, current) {
throw MakeTypeError('called_non_callable', [callback]);
}
var is_array = IS_ARRAY(array);
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i++];
break find_initial;
}
@ -1421,7 +1445,7 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i < length; i++) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);
@ -1443,10 +1467,11 @@ function ArrayReduceRight(callback, current) {
throw MakeTypeError('called_non_callable', [callback]);
}
var is_array = IS_ARRAY(array);
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i--];
break find_initial;
}
@ -1457,7 +1482,7 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
for (; i >= 0; i--) {
if (i in array) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);

125
deps/v8/src/ast-numbering.cc

@ -16,11 +16,14 @@ namespace internal {
class AstNumberingVisitor FINAL : public AstVisitor {
public:
explicit AstNumberingVisitor(Zone* zone)
: AstVisitor(), next_id_(BailoutId::FirstUsable().ToInt()) {
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
dont_crankshaft_reason_(kNoReason),
dont_turbofan_reason_(kNoReason) {
InitializeAstVisitor(zone);
}
void Renumber(FunctionLiteral* node);
bool Renumber(FunctionLiteral* node);
private:
// AST node visitor interface.
@ -28,6 +31,8 @@ class AstNumberingVisitor FINAL : public AstVisitor {
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
bool Finish(FunctionLiteral* node);
void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
void VisitArguments(ZoneList<Expression*>* arguments);
@ -40,9 +45,56 @@ class AstNumberingVisitor FINAL : public AstVisitor {
}
void IncrementNodeCount() { properties_.add_node_count(1); }
void DisableCrankshaft(BailoutReason reason) {
dont_crankshaft_reason_ = reason;
properties_.flags()->Add(kDontSelfOptimize);
}
// TODO(turbofan): Remove the dont_turbofan_reason once no nodes are
// DontTurbofanNode. That set of nodes must be kept in sync with
// Pipeline::GenerateCode.
void DisableTurbofan(BailoutReason reason) {
dont_crankshaft_reason_ = reason;
dont_turbofan_reason_ = reason;
DisableSelfOptimization();
}
void DisableSelfOptimization() {
properties_.flags()->Add(kDontSelfOptimize);
}
void DisableCaching(BailoutReason reason) {
dont_crankshaft_reason_ = reason;
DisableSelfOptimization();
properties_.flags()->Add(kDontCache);
}
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
FeedbackVectorRequirements reqs =
node->ComputeFeedbackRequirements(isolate());
if (reqs.slots() > 0) {
node->SetFirstFeedbackSlot(FeedbackVectorSlot(properties_.slots()));
properties_.increase_slots(reqs.slots());
}
if (reqs.ic_slots() > 0) {
int ic_slots = properties_.ic_slots();
node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots));
properties_.increase_ic_slots(reqs.ic_slots());
if (FLAG_vector_ics) {
for (int i = 0; i < reqs.ic_slots(); i++) {
properties_.SetKind(ic_slots + i, node->FeedbackICSlotKind(i));
}
}
}
}
BailoutReason dont_optimize_reason() const {
return (dont_turbofan_reason_ != kNoReason) ? dont_turbofan_reason_
: dont_crankshaft_reason_;
}
int next_id_;
AstProperties properties_;
BailoutReason dont_crankshaft_reason_;
BailoutReason dont_turbofan_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@ -57,12 +109,14 @@ void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
IncrementNodeCount();
DisableCrankshaft(kExportDeclaration);
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
IncrementNodeCount();
DisableCrankshaft(kModuleUrl);
}
@ -83,6 +137,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kDebuggerStatement);
node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
}
@ -90,6 +145,7 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
DisableCrankshaft(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
}
@ -108,6 +164,10 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
IncrementNodeCount();
if (node->var()->IsLookupSlot()) {
DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
}
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
@ -120,6 +180,8 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
IncrementNodeCount();
DisableTurbofan(kSuperReference);
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
Visit(node->this_var());
}
@ -127,6 +189,7 @@ void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
IncrementNodeCount();
DisableCrankshaft(kModuleDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}
@ -134,6 +197,7 @@ void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
DisableCrankshaft(kImportDeclaration);
VisitVariableProxy(node->proxy());
Visit(node->module());
}
@ -141,18 +205,21 @@ void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
void AstNumberingVisitor::VisitModuleVariable(ModuleVariable* node) {
IncrementNodeCount();
DisableCrankshaft(kModuleVariable);
Visit(node->proxy());
}
void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
IncrementNodeCount();
DisableCrankshaft(kModulePath);
Visit(node->module());
}
void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kModuleStatement);
Visit(node->body());
}
@ -171,6 +238,8 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
void AstNumberingVisitor::VisitYield(Yield* node) {
IncrementNodeCount();
DisableCrankshaft(kYield);
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Yield::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
@ -215,12 +284,18 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitModuleLiteral(ModuleLiteral* node) {
IncrementNodeCount();
DisableCaching(kModuleLiteral);
VisitBlock(node->body());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
DisableCrankshaft(kCallToAJavaScriptRuntimeFunction);
}
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
@ -228,6 +303,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
@ -235,6 +311,7 @@ void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
Visit(node->body());
Visit(node->cond());
@ -243,6 +320,7 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
Visit(node->cond());
Visit(node->body());
@ -251,6 +329,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
DisableTurbofan(kTryCatchStatement);
Visit(node->try_block());
Visit(node->catch_block());
}
@ -258,6 +337,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableTurbofan(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
@ -265,6 +345,7 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
void AstNumberingVisitor::VisitProperty(Property* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
@ -298,6 +379,8 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
Visit(node->each());
Visit(node->enumerable());
@ -307,6 +390,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
DisableTurbofan(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator());
Visit(node->next_result());
@ -357,6 +441,7 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
if (node->init() != NULL) Visit(node->init());
if (node->cond() != NULL) Visit(node->cond());
@ -367,9 +452,13 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
DisableTurbofan(kClassLiteral);
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
VisitVariableProxy(node->class_variable_proxy());
}
for (int i = 0; i < node->properties()->length(); i++) {
VisitObjectLiteralProperty(node->properties()->at(i));
}
@ -403,6 +492,7 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
@ -411,6 +501,7 @@ void AstNumberingVisitor::VisitCall(Call* node) {
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(CallNew::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
@ -448,17 +539,26 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
}
void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
properties_.flags()->Add(*node->flags());
properties_.increase_feedback_slots(node->slot_count());
properties_.increase_ic_feedback_slots(node->ic_slot_count());
bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
return !HasStackOverflow();
}
if (node->scope()->HasIllegalRedeclaration()) {
node->scope()->VisitIllegalRedeclaration(this);
return;
}
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
Scope* scope = node->scope();
if (scope->HasIllegalRedeclaration()) {
scope->VisitIllegalRedeclaration(this);
DisableCrankshaft(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
}
VisitDeclarations(scope->declarations());
if (scope->is_function_scope() && scope->function() != NULL) {
// Visit the name of the named function expression.
@ -466,14 +566,13 @@ void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
VisitStatements(node->body());
node->set_ast_properties(&properties_);
return Finish(node);
}
bool AstNumbering::Renumber(FunctionLiteral* function, Zone* zone) {
AstNumberingVisitor visitor(zone);
visitor.Renumber(function);
return !visitor.HasStackOverflow();
return visitor.Renumber(function);
}
}
} // namespace v8::internal

239
deps/v8/src/ast-this-access-visitor.cc

@ -0,0 +1,239 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast-this-access-visitor.h"
#include "src/parser.h"
namespace v8 {
namespace internal {
typedef class AstThisAccessVisitor ATAV; // for code shortitude.
ATAV::AstThisAccessVisitor(Zone* zone) : uses_this_(false) {
InitializeAstVisitor(zone);
}
void ATAV::VisitVariableProxy(VariableProxy* proxy) {
if (proxy->is_this()) {
uses_this_ = true;
}
}
void ATAV::VisitSuperReference(SuperReference* leaf) {
// disallow super.method() and super(...).
uses_this_ = true;
}
void ATAV::VisitCallNew(CallNew* e) {
// new super(..) does not use 'this'.
if (!e->expression()->IsSuperReference()) {
Visit(e->expression());
}
VisitExpressions(e->arguments());
}
// ---------------------------------------------------------------------------
// -- Leaf nodes -------------------------------------------------------------
// ---------------------------------------------------------------------------
void ATAV::VisitVariableDeclaration(VariableDeclaration* leaf) {}
void ATAV::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
void ATAV::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
void ATAV::VisitImportDeclaration(ImportDeclaration* leaf) {}
void ATAV::VisitExportDeclaration(ExportDeclaration* leaf) {}
void ATAV::VisitModuleVariable(ModuleVariable* leaf) {}
void ATAV::VisitModulePath(ModulePath* leaf) {}
void ATAV::VisitModuleUrl(ModuleUrl* leaf) {}
void ATAV::VisitEmptyStatement(EmptyStatement* leaf) {}
void ATAV::VisitContinueStatement(ContinueStatement* leaf) {}
void ATAV::VisitBreakStatement(BreakStatement* leaf) {}
void ATAV::VisitDebuggerStatement(DebuggerStatement* leaf) {}
void ATAV::VisitFunctionLiteral(FunctionLiteral* leaf) {}
void ATAV::VisitNativeFunctionLiteral(NativeFunctionLiteral* leaf) {}
void ATAV::VisitLiteral(Literal* leaf) {}
void ATAV::VisitRegExpLiteral(RegExpLiteral* leaf) {}
void ATAV::VisitThisFunction(ThisFunction* leaf) {}
// ---------------------------------------------------------------------------
// -- Pass-through nodes------------------------------------------------------
// ---------------------------------------------------------------------------
void ATAV::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
void ATAV::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
void ATAV::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void ATAV::VisitIfStatement(IfStatement* stmt) {
Visit(stmt->condition());
Visit(stmt->then_statement());
Visit(stmt->else_statement());
}
void ATAV::VisitReturnStatement(ReturnStatement* stmt) {
Visit(stmt->expression());
}
void ATAV::VisitWithStatement(WithStatement* stmt) {
Visit(stmt->expression());
Visit(stmt->statement());
}
void ATAV::VisitSwitchStatement(SwitchStatement* stmt) {
Visit(stmt->tag());
ZoneList<CaseClause*>* clauses = stmt->cases();
for (int i = 0; i < clauses->length(); i++) {
Visit(clauses->at(i));
}
}
void ATAV::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Visit(stmt->try_block());
Visit(stmt->finally_block());
}
void ATAV::VisitClassLiteral(ClassLiteral* e) {
VisitIfNotNull(e->extends());
Visit(e->constructor());
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
Visit(properties->at(i)->value());
}
}
void ATAV::VisitConditional(Conditional* e) {
Visit(e->condition());
Visit(e->then_expression());
Visit(e->else_expression());
}
void ATAV::VisitObjectLiteral(ObjectLiteral* e) {
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
Visit(properties->at(i)->value());
}
}
void ATAV::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
void ATAV::VisitYield(Yield* stmt) {
Visit(stmt->generator_object());
Visit(stmt->expression());
}
void ATAV::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
void ATAV::VisitProperty(Property* e) {
Visit(e->obj());
Visit(e->key());
}
void ATAV::VisitCall(Call* e) {
Visit(e->expression());
VisitExpressions(e->arguments());
}
void ATAV::VisitCallRuntime(CallRuntime* e) {
VisitExpressions(e->arguments());
}
void ATAV::VisitUnaryOperation(UnaryOperation* e) { Visit(e->expression()); }
void ATAV::VisitBinaryOperation(BinaryOperation* e) {
Visit(e->left());
Visit(e->right());
}
void ATAV::VisitCompareOperation(CompareOperation* e) {
Visit(e->left());
Visit(e->right());
}
void ATAV::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
VisitStatements(cc->statements());
}
void ATAV::VisitModuleStatement(ModuleStatement* stmt) { Visit(stmt->body()); }
void ATAV::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
Visit(stmt->catch_block());
}
void ATAV::VisitDoWhileStatement(DoWhileStatement* loop) {
Visit(loop->body());
Visit(loop->cond());
}
void ATAV::VisitWhileStatement(WhileStatement* loop) {
Visit(loop->cond());
Visit(loop->body());
}
void ATAV::VisitForStatement(ForStatement* loop) {
VisitIfNotNull(loop->init());
VisitIfNotNull(loop->cond());
Visit(loop->body());
VisitIfNotNull(loop->next());
}
void ATAV::VisitForInStatement(ForInStatement* loop) {
Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
}
void ATAV::VisitForOfStatement(ForOfStatement* loop) {
Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
}
void ATAV::VisitAssignment(Assignment* stmt) {
Expression* l = stmt->target();
Visit(l);
Visit(stmt->value());
}
void ATAV::VisitCountOperation(CountOperation* e) {
Expression* l = e->expression();
Visit(l);
}
}
} // namespace v8::internal

34
deps/v8/src/ast-this-access-visitor.h

@ -0,0 +1,34 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_THIS_ACCESS_VISITOR_H_
#define V8_AST_THIS_ACCESS_VISITOR_H_
#include "src/ast.h"
namespace v8 {
namespace internal {
class AstThisAccessVisitor : public AstVisitor {
public:
explicit AstThisAccessVisitor(Zone* zone);
bool UsesThis() { return uses_this_; }
#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
private:
bool uses_this_;
void VisitIfNotNull(AstNode* node) {
if (node != NULL) Visit(node);
}
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstThisAccessVisitor);
};
}
} // namespace v8::internal
#endif // V8_AST_THIS_ACCESS_VISITOR_H_

15
deps/v8/src/ast-value-factory.cc

@ -56,22 +56,20 @@ class AstRawStringInternalizationKey : public HashTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {}
virtual bool IsMatch(Object* other) OVERRIDE {
bool IsMatch(Object* other) OVERRIDE {
if (string_->is_one_byte_)
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
virtual uint32_t Hash() OVERRIDE {
return string_->hash() >> Name::kHashShift;
}
uint32_t Hash() OVERRIDE { return string_->hash() >> Name::kHashShift; }
virtual uint32_t HashForObject(Object* key) OVERRIDE {
uint32_t HashForObject(Object* key) OVERRIDE {
return String::cast(key)->Hash();
}
virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (string_->is_one_byte_)
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
@ -182,9 +180,8 @@ void AstValue::Internalize(Isolate* isolate) {
DCHECK(!string_->string().is_null());
break;
case SYMBOL:
value_ = Object::GetProperty(
isolate, handle(isolate->native_context()->builtins()),
symbol_name_).ToHandleChecked();
DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
value_ = isolate->factory()->iterator_symbol();
break;
case NUMBER:
value_ = isolate->factory()->NewNumber(number_, TENURED);

12
deps/v8/src/ast-value-factory.h

@ -64,13 +64,13 @@ class AstString : public ZoneObject {
class AstRawString : public AstString {
public:
virtual int length() const OVERRIDE {
int length() const OVERRIDE {
if (is_one_byte_)
return literal_bytes_.length();
return literal_bytes_.length() / 2;
}
virtual void Internalize(Isolate* isolate) OVERRIDE;
void Internalize(Isolate* isolate) OVERRIDE;
bool AsArrayIndex(uint32_t* index) const;
@ -124,11 +124,9 @@ class AstConsString : public AstString {
: left_(left),
right_(right) {}
virtual int length() const OVERRIDE {
return left_->length() + right_->length();
}
int length() const OVERRIDE { return left_->length() + right_->length(); }
virtual void Internalize(Isolate* isolate) OVERRIDE;
void Internalize(Isolate* isolate) OVERRIDE;
private:
friend class AstValueFactory;
@ -248,8 +246,10 @@ class AstValue : public ZoneObject {
F(dot_result, ".result") \
F(empty, "") \
F(eval, "eval") \
F(get_template_callsite, "GetTemplateCallSite") \
F(initialize_const_global, "initializeConstGlobal") \
F(initialize_var_global, "initializeVarGlobal") \
F(let, "let") \
F(make_reference_error, "MakeReferenceErrorEmbedded") \
F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
F(make_type_error, "MakeTypeErrorEmbedded") \

149
deps/v8/src/ast.cc

@ -151,6 +151,21 @@ StrictMode FunctionLiteral::strict_mode() const {
}
bool FunctionLiteral::uses_super_property() const {
DCHECK_NOT_NULL(scope());
return scope()->uses_super_property() || scope()->inner_uses_super_property();
}
bool FunctionLiteral::uses_super_constructor_call() const {
DCHECK_NOT_NULL(scope());
return scope()->uses_super_constructor_call() ||
scope()->inner_uses_super_constructor_call();
}
// Helper to find an existing shared function info in the baseline code for the
// given function literal. Used to canonicalize SharedFunctionInfo objects.
void FunctionLiteral::InitializeSharedInfo(
Handle<Code> unoptimized_code) {
for (RelocIterator it(*unoptimized_code); !it.done(); it.next()) {
@ -567,6 +582,12 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
}
FeedbackVectorRequirements Call::ComputeFeedbackRequirements(Isolate* isolate) {
int ic_slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
return FeedbackVectorRequirements(0, ic_slots);
}
Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
@ -995,134 +1016,6 @@ CaseClause::CaseClause(Zone* zone, Expression* label,
compare_type_(Type::None(zone)) {}
#define REGULAR_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
}
#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_slot_node(node); \
}
#define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_slot_node(node); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_flag(kDontSelfOptimize); \
}
#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
add_slot_node(node); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \
}
REGULAR_NODE(VariableDeclaration)
REGULAR_NODE(FunctionDeclaration)
REGULAR_NODE(Block)
REGULAR_NODE(ExpressionStatement)
REGULAR_NODE(EmptyStatement)
REGULAR_NODE(IfStatement)
REGULAR_NODE(ContinueStatement)
REGULAR_NODE(BreakStatement)
REGULAR_NODE(ReturnStatement)
REGULAR_NODE(SwitchStatement)
REGULAR_NODE(CaseClause)
REGULAR_NODE(Conditional)
REGULAR_NODE(Literal)
REGULAR_NODE(ArrayLiteral)
REGULAR_NODE(ObjectLiteral)
REGULAR_NODE(RegExpLiteral)
REGULAR_NODE(FunctionLiteral)
REGULAR_NODE(Assignment)
REGULAR_NODE(Throw)
REGULAR_NODE(UnaryOperation)
REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(Property)
// In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot())
// set_dont_optimize_reason(kReferenceToAVariableWhichRequiresDynamicLookup);
// But node->var() is usually not bound yet at VariableProxy creation time, and
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE_WITH_FEEDBACK_SLOTS(VariableProxy)
// We currently do not optimize any modules.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
// This list must be kept in sync with Pipeline::GenerateCode.
DONT_TURBOFAN_NODE(ForOfStatement)
DONT_TURBOFAN_NODE(TryCatchStatement)
DONT_TURBOFAN_NODE(TryFinallyStatement)
DONT_TURBOFAN_NODE(ClassLiteral)
DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(SuperReference)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
DONT_CACHE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
add_slot_node(node);
if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them.
set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
}
}
#undef REGULAR_NODE
#undef DONT_OPTIMIZE_NODE
#undef DONT_SELFOPTIMIZE_NODE
#undef DONT_CACHE_NODE
uint32_t Literal::Hash() {
return raw_value()->IsString()
? raw_value()->AsString()->hash()

835
deps/v8/src/ast.h

File diff suppressed because it is too large

86
deps/v8/src/base/cpu.cc

@ -291,32 +291,36 @@ static bool HasListItem(const char* list, const char* item) {
#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
CPU::CPU() : stepping_(0),
model_(0),
ext_model_(0),
family_(0),
ext_family_(0),
type_(0),
implementer_(0),
architecture_(0),
part_(0),
has_fpu_(false),
has_cmov_(false),
has_sahf_(false),
has_mmx_(false),
has_sse_(false),
has_sse2_(false),
has_sse3_(false),
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
has_idiva_(false),
has_neon_(false),
has_thumb2_(false),
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false),
is_fp64_mode_(false) {
CPU::CPU()
: stepping_(0),
model_(0),
ext_model_(0),
family_(0),
ext_family_(0),
type_(0),
implementer_(0),
architecture_(0),
variant_(-1),
part_(0),
has_fpu_(false),
has_cmov_(false),
has_sahf_(false),
has_mmx_(false),
has_sse_(false),
has_sse2_(false),
has_sse3_(false),
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
has_avx_(false),
has_fma3_(false),
has_idiva_(false),
has_neon_(false),
has_thumb2_(false),
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false),
is_fp64_mode_(false) {
memcpy(vendor_, "Unknown", 8);
#if V8_OS_NACL
// Portable host shouldn't do feature detection.
@ -356,6 +360,8 @@ CPU::CPU() : stepping_(0),
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
has_avx_ = (cpu_info[2] & 0x10000000) != 0;
if (has_avx_) has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
}
#if V8_HOST_ARCH_IA32
@ -383,7 +389,7 @@ CPU::CPU() : stepping_(0),
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end ;
char* end;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
implementer_ = 0;
@ -391,10 +397,20 @@ CPU::CPU() : stepping_(0),
delete[] implementer;
}
char* variant = cpu_info.ExtractField("CPU variant");
if (variant != NULL) {
char* end;
variant_ = strtol(variant, &end, 0);
if (end == variant) {
variant_ = -1;
}
delete[] variant;
}
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end ;
char* end;
part_ = strtol(part, &end, 0);
if (end == part) {
part_ = 0;
@ -535,7 +551,7 @@ CPU::CPU() : stepping_(0),
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end ;
char* end;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
implementer_ = 0;
@ -543,10 +559,20 @@ CPU::CPU() : stepping_(0),
delete[] implementer;
}
char* variant = cpu_info.ExtractField("CPU variant");
if (variant != NULL) {
char* end;
variant_ = strtol(variant, &end, 0);
if (end == variant) {
variant_ = -1;
}
delete[] variant;
}
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end ;
char* end;
part_ = strtol(part, &end, 0);
if (end == part) {
part_ = 0;

7
deps/v8/src/base/cpu.h

@ -47,6 +47,8 @@ class CPU FINAL {
static const int NVIDIA = 0x4e;
static const int QUALCOMM = 0x51;
int architecture() const { return architecture_; }
int variant() const { return variant_; }
static const int NVIDIA_DENVER = 0x0;
int part() const { return part_; }
static const int ARM_CORTEX_A5 = 0xc05;
static const int ARM_CORTEX_A7 = 0xc07;
@ -68,6 +70,8 @@ class CPU FINAL {
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
bool has_avx() const { return has_avx_; }
bool has_fma3() const { return has_fma3_; }
// arm features
bool has_idiva() const { return has_idiva_; }
@ -90,6 +94,7 @@ class CPU FINAL {
int type_;
int implementer_;
int architecture_;
int variant_;
int part_;
bool has_fpu_;
bool has_cmov_;
@ -101,6 +106,8 @@ class CPU FINAL {
bool has_ssse3_;
bool has_sse41_;
bool has_sse42_;
bool has_avx_;
bool has_fma3_;
bool has_idiva_;
bool has_neon_;
bool has_thumb2_;

56
deps/v8/src/base/iterator.h

@ -0,0 +1,56 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_ITERATOR_H_
#define V8_BASE_ITERATOR_H_
#include <iterator>
#include "src/base/macros.h"
namespace v8 {
namespace base {
// The intention of the base::iterator_range class is to encapsulate two
// iterators so that the range defined by the iterators can be used like
// a regular STL container (actually only a subset of the full container
// functionality is available usually).
template <typename ForwardIterator>
class iterator_range {
public:
typedef ForwardIterator iterator;
typedef ForwardIterator const_iterator;
typedef typename std::iterator_traits<iterator>::pointer pointer;
typedef typename std::iterator_traits<iterator>::reference reference;
typedef typename std::iterator_traits<iterator>::value_type value_type;
typedef
typename std::iterator_traits<iterator>::difference_type difference_type;
iterator_range() : begin_(), end_() {}
template <typename ForwardIterator2>
iterator_range(ForwardIterator2 const& begin, ForwardIterator2 const& end)
: begin_(begin), end_(end) {}
iterator begin() { return begin_; }
iterator end() { return end_; }
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
const_iterator cbegin() const { return begin_; }
const_iterator cend() const { return end_; }
bool empty() const { return cbegin() == cend(); }
// Random Access iterators only.
reference operator[](difference_type n) { return begin()[n]; }
difference_type size() const { return cend() - cbegin(); }
private:
const_iterator const begin_;
const_iterator const end_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_ITERATOR_H_

6
deps/v8/src/base/macros.h

@ -20,9 +20,9 @@
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
// use 0 or NULL, which causes a problem with the compiler warnings
// we have enabled (which is also why 'offsetof' doesn't seem to work).
// Here we simply use the non-zero value 4, which seems to work.
#define OFFSET_OF(type, field) \
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
// Here we simply use the aligned, non-zero value 16.
#define OFFSET_OF(type, field) \
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(16)->field)) - 16)
#if V8_OS_NACL

2
deps/v8/src/base/platform/platform-linux.cc

@ -8,10 +8,10 @@
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code

3
deps/v8/src/base/platform/platform-posix.cc

@ -13,6 +13,7 @@
#include <pthread_np.h> // for pthread_set_name_np
#endif
#include <sched.h> // for sched_yield
#include <stdio.h>
#include <time.h>
#include <unistd.h>
@ -253,7 +254,7 @@ int OS::GetCurrentProcessId() {
int OS::GetCurrentThreadId() {
#if V8_OS_MACOSX
#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
return static_cast<int>(pthread_mach_thread_np(pthread_self()));
#elif V8_OS_LINUX
return static_cast<int>(syscall(__NR_gettid));

43
deps/v8/src/base/platform/platform-win32.cc

@ -346,26 +346,41 @@ void Win32Time::SetToCurrentTime() {
}
int64_t FileTimeToInt64(FILETIME ft) {
ULARGE_INTEGER result;
result.LowPart = ft.dwLowDateTime;
result.HighPart = ft.dwHighDateTime;
return static_cast<int64_t>(result.QuadPart);
}
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
FILETIME local;
SYSTEMTIME system_utc, system_local;
FileTimeToSystemTime(&time_.ft_, &system_utc);
SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
SystemTimeToFileTime(&system_local, &local);
return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) / kTimeScaler;
cache->InitializeIfNeeded();
Win32Time rounded_to_second(*this);
rounded_to_second.t() =
rounded_to_second.t() / 1000 / kTimeScaler * 1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
// Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
// very slow. Other browsers use localtime().
// Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
// POSIX seconds past 1/1/1970 0:00:00.
double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
return 0;
}
// Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
time_t posix_time = static_cast<time_t>(unchecked_posix_time);
// Convert to local time, as struct with fields for day, hour, year, etc.
tm posix_local_time_struct;
if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
if (posix_local_time_struct.tm_isdst > 0) {
return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
} else if (posix_local_time_struct.tm_isdst == 0) {
return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
} else {
return cache->tzinfo_.Bias * -kMsPerMinute;
}
}

12
deps/v8/src/base/platform/time.cc

@ -401,7 +401,7 @@ class HighResolutionTickClock FINAL : public TickClock {
}
virtual ~HighResolutionTickClock() {}
virtual int64_t Now() OVERRIDE {
int64_t Now() OVERRIDE {
LARGE_INTEGER now;
BOOL result = QueryPerformanceCounter(&now);
DCHECK(result);
@ -419,9 +419,7 @@ class HighResolutionTickClock FINAL : public TickClock {
return ticks + 1;
}
virtual bool IsHighResolution() OVERRIDE {
return true;
}
bool IsHighResolution() OVERRIDE { return true; }
private:
int64_t ticks_per_second_;
@ -435,7 +433,7 @@ class RolloverProtectedTickClock FINAL : public TickClock {
RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
virtual ~RolloverProtectedTickClock() {}
virtual int64_t Now() OVERRIDE {
int64_t Now() OVERRIDE {
LockGuard<Mutex> lock_guard(&mutex_);
// We use timeGetTime() to implement TimeTicks::Now(), which rolls over
// every ~49.7 days. We try to track rollover ourselves, which works if
@ -454,9 +452,7 @@ class RolloverProtectedTickClock FINAL : public TickClock {
return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
}
virtual bool IsHighResolution() OVERRIDE {
return false;
}
bool IsHighResolution() OVERRIDE { return false; }
private:
Mutex mutex_;

8
deps/v8/src/base/sys-info.cc

@ -34,14 +34,12 @@ int SysInfo::NumberOfProcessors() {
int ncpu = 0;
size_t len = sizeof(ncpu);
if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
UNREACHABLE();
return 1;
}
return ncpu;
#elif V8_OS_POSIX
long result = sysconf(_SC_NPROCESSORS_ONLN); // NOLINT(runtime/int)
if (result == -1) {
UNREACHABLE();
return 1;
}
return static_cast<int>(result);
@ -60,7 +58,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
int64_t memsize = 0;
size_t len = sizeof(memsize);
if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
UNREACHABLE();
return 0;
}
return memsize;
@ -70,7 +67,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
if (pages == -1 || page_size == -1) {
UNREACHABLE();
return 0;
}
return static_cast<int64_t>(pages) * page_size;
@ -78,7 +74,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
MEMORYSTATUSEX memory_info;
memory_info.dwLength = sizeof(memory_info);
if (!GlobalMemoryStatusEx(&memory_info)) {
UNREACHABLE();
return 0;
}
int64_t result = static_cast<int64_t>(memory_info.ullTotalPhys);
@ -87,7 +82,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
#elif V8_OS_QNX
struct stat stat_buf;
if (stat("/proc", &stat_buf) != 0) {
UNREACHABLE();
return 0;
}
return static_cast<int64_t>(stat_buf.st_size);
@ -98,7 +92,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
long pages = sysconf(_SC_PHYS_PAGES); // NOLINT(runtime/int)
long page_size = sysconf(_SC_PAGESIZE); // NOLINT(runtime/int)
if (pages == -1 || page_size == -1) {
UNREACHABLE();
return 0;
}
return static_cast<int64_t>(pages) * page_size;
@ -114,7 +107,6 @@ int64_t SysInfo::AmountOfVirtualMemory() {
struct rlimit rlim;
int result = getrlimit(RLIMIT_DATA, &rlim);
if (result != 0) {
UNREACHABLE();
return 0;
}
return (rlim.rlim_cur == RLIM_INFINITY) ? 0 : rlim.rlim_cur;

212
deps/v8/src/bootstrapper.cc

@ -48,7 +48,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
Heap* heap = isolate_->heap();
if (heap->natives_source_cache()->get(index)->IsUndefined()) {
// We can use external strings for the natives.
Vector<const char> source = Natives::GetRawScriptSource(index);
Vector<const char> source = Natives::GetScriptSource(index);
NativesExternalStringResource* resource =
new NativesExternalStringResource(this,
source.start(),
@ -361,8 +361,8 @@ Handle<Context> Bootstrapper::CreateEnvironment(
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
Handle<Map> new_map = Map::Copy(old_map);
new_map->set_prototype(*proto);
Handle<Map> new_map = Map::Copy(old_map, "SetObjectPrototype");
new_map->SetPrototype(proto, FAST_PROTOTYPE);
JSObject::MigrateToMap(object, new_map);
}
@ -493,6 +493,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<String> object_name = factory->Object_string();
Handle<JSObject> object_function_prototype;
{ // --- O b j e c t ---
Handle<JSFunction> object_fun = factory->NewFunction(object_name);
int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
@ -507,19 +509,20 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
Handle<JSObject> prototype = factory->NewJSObject(
isolate->object_function(),
TENURED);
Handle<Map> map = Map::Copy(handle(prototype->map()));
object_function_prototype =
factory->NewJSObject(isolate->object_function(), TENURED);
Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
"EmptyObjectPrototype");
map->set_is_prototype_map(true);
prototype->set_map(*map);
object_function_prototype->set_map(*map);
native_context()->set_initial_object_prototype(*prototype);
native_context()->set_initial_object_prototype(*object_function_prototype);
// For bootstrapping set the array prototype to be the same as the object
// prototype, otherwise the missing initial_array_prototype will cause
// assertions during startup.
native_context()->set_initial_array_prototype(*prototype);
Accessors::FunctionSetPrototype(object_fun, prototype).Assert();
native_context()->set_initial_array_prototype(*object_function_prototype);
Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
.Assert();
}
// Allocate the empty function as the prototype for function ECMAScript
@ -534,8 +537,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<Map> empty_function_map =
CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
DCHECK(!empty_function_map->is_dictionary_map());
empty_function_map->set_prototype(
native_context()->object_function()->prototype());
empty_function_map->SetPrototype(object_function_prototype);
empty_function_map->set_is_prototype_map(true);
empty_function->set_map(*empty_function_map);
@ -549,10 +551,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
empty_function->shared()->DontAdaptArguments();
// Set prototypes for the function maps.
native_context()->sloppy_function_map()->set_prototype(*empty_function);
native_context()->sloppy_function_without_prototype_map()->
set_prototype(*empty_function);
sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
native_context()->sloppy_function_map()->SetPrototype(empty_function);
native_context()->sloppy_function_without_prototype_map()->SetPrototype(
empty_function);
sloppy_function_map_writable_prototype_->SetPrototype(empty_function);
return empty_function;
}
@ -654,7 +656,7 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrictFunctionInstanceDescriptor(map, function_mode);
map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
map->set_prototype(*empty_function);
map->SetPrototype(empty_function);
return map;
}
@ -863,7 +865,6 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
Handle<JSGlobalProxy> global_proxy) {
// Set the native context for the global object.
global_object->set_native_context(*native_context());
global_object->set_global_context(*native_context());
global_object->set_global_proxy(*global_proxy);
global_proxy->set_native_context(*native_context());
native_context()->set_global_proxy(*global_proxy);
@ -908,6 +909,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
native_context()->set_script_context_table(*script_context_table);
Handle<String> object_name = factory->Object_string();
JSObject::AddProperty(
global_object, object_name, isolate->object_function(), DONT_ENUM);
@ -946,7 +951,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
CallbacksDescriptor d(
Handle<Name>(Name::cast(array_length->name())),
array_length, attribs);
array_function->initial_map()->AppendDescriptor(&d);
initial_map->AppendDescriptor(&d);
}
// array_function is used internally. JS code creating array object should
@ -1040,11 +1045,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
{
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(factory->source_string(),
JSRegExp::kSourceFieldIndex,
final,
Representation::Tagged());
initial_map->AppendDescriptor(&field);
Handle<AccessorInfo> regexp_source(
Accessors::RegExpSourceInfo(isolate, final));
CallbacksDescriptor d(factory->source_string(), regexp_source, final);
initial_map->AppendDescriptor(&d);
}
{
// ECMA-262, section 15.10.7.2.
@ -1081,19 +1085,17 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
initial_map->AppendDescriptor(&field);
}
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
static const int num_fields = JSRegExp::kInObjectFieldCount;
initial_map->set_inobject_properties(num_fields);
initial_map->set_pre_allocated_property_fields(num_fields);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
initial_map->set_instance_size(initial_map->instance_size() +
num_fields * kPointerSize);
// RegExp prototype object is itself a RegExp.
Handle<Map> proto_map = Map::Copy(initial_map);
proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
DCHECK(proto_map->prototype() == *isolate->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->query_colon_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@ -1104,7 +1106,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Smi::FromInt(0),
SKIP_WRITE_BARRIER); // It's a Smi.
proto_map->set_is_prototype_map(true);
initial_map->set_prototype(*proto);
initial_map->SetPrototype(proto);
factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
JSRegExp::IRREGEXP, factory->empty_string(),
JSRegExp::Flags(0), 0);
@ -1244,7 +1246,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
{ // --- aliased arguments map
Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map());
Handle<Map> map =
Map::Copy(isolate->sloppy_arguments_map(), "AliasedArguments");
map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
DCHECK_EQ(2, map->pre_allocated_property_fields());
native_context()->set_aliased_arguments_map(*map);
@ -1288,7 +1291,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
// @@iterator method is added later.
map->set_function_with_prototype(true);
map->set_prototype(native_context()->object_function()->prototype());
DCHECK_EQ(native_context()->object_function()->prototype(),
*isolate->initial_object_prototype());
map->SetPrototype(isolate->initial_object_prototype());
map->set_pre_allocated_property_fields(1);
map->set_inobject_properties(1);
@ -1340,11 +1345,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
delegate->shared()->DontAdaptArguments();
}
#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@ -1379,6 +1379,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
}
@ -1396,8 +1397,8 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Factory* factory = isolate->factory();
Handle<String> source_code;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, source_code, factory->NewStringFromAscii(
ExperimentalNatives::GetRawScriptSource(index)),
isolate, source_code,
factory->NewStringFromAscii(ExperimentalNatives::GetScriptSource(index)),
false);
return CompileNative(isolate, name, source_code);
}
@ -1527,6 +1528,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
INSTALL_NATIVE(JSFunction, "ToLength", to_length_fun);
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
@ -1557,14 +1559,7 @@ void Genesis::InstallNativeFunctions() {
native_object_get_notifier);
INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
native_object_notifier_perform_change);
INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol);
INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
#undef INSTALL_NATIVE_FUNCTIONS_FOR
}
@ -1579,6 +1574,7 @@ void Genesis::InstallExperimentalNativeFunctions() {
#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
#undef INSTALL_NATIVE_FUNCTIONS_FOR
}
@ -1590,12 +1586,16 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_scoping)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_strings)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_numeric_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_templates)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
void Genesis::InstallNativeFunctions_harmony_proxies() {
@ -1616,12 +1616,16 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_scoping)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strings)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_templates)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
void Genesis::InitializeGlobal_harmony_regexps() {
Handle<JSObject> builtins(native_context()->builtins());
@ -1657,7 +1661,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
Handle<Map> initial_map = Map::Copy(original_map);
Handle<Map> initial_map = Map::Copy(original_map, "InternalArray");
initial_map->set_elements_kind(elements_kind);
JSFunction::SetInitialMap(array_function, initial_map, prototype);
@ -1672,7 +1676,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
{ // Add length.
CallbacksDescriptor d(
Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
array_function->initial_map()->AppendDescriptor(&d);
initial_map->AppendDescriptor(&d);
}
return array_function;
@ -1702,7 +1706,6 @@ bool Genesis::InstallNatives() {
Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
builtins->set_native_context(*native_context());
builtins->set_global_context(*native_context());
builtins->set_global_proxy(native_context()->global_proxy());
@ -1935,8 +1938,8 @@ bool Genesis::InstallNatives() {
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
Handle<Map> generator_function_map =
Map::Copy(sloppy_function_map_writable_prototype_);
generator_function_map->set_prototype(*generator_function_prototype);
Map::Copy(sloppy_function_map_writable_prototype_, "GeneratorFunction");
generator_function_map->SetPrototype(generator_function_prototype);
native_context()->set_sloppy_generator_function_map(
*generator_function_map);
@ -1966,15 +1969,16 @@ bool Genesis::InstallNatives() {
rw_attribs, poison_pair);
Handle<Map> strict_function_map(native_context()->strict_function_map());
Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
Handle<Map> strict_generator_function_map =
Map::Copy(strict_function_map, "StrictGeneratorFunction");
// "arguments" and "caller" already poisoned.
strict_generator_function_map->set_prototype(*generator_function_prototype);
strict_generator_function_map->SetPrototype(generator_function_prototype);
native_context()->set_strict_generator_function_map(
*strict_generator_function_map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
generator_object_prototype_map->set_prototype(*generator_object_prototype);
generator_object_prototype_map->SetPrototype(generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
}
@ -1984,6 +1988,17 @@ bool Genesis::InstallNatives() {
return true;
}
// Install public symbols.
{
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
#define INSTALL_PUBLIC_SYMBOL(name, varname, description) \
Handle<String> varname = factory()->NewStringFromStaticChars(#varname); \
JSObject::AddProperty(builtins, varname, factory()->name(), attributes);
PUBLIC_SYMBOL_LIST(INSTALL_PUBLIC_SYMBOL)
#undef INSTALL_PUBLIC_SYMBOL
}
// Install natives.
for (int i = Natives::GetDebuggerCount();
i < Natives::GetBuiltinsCount();
@ -2024,8 +2039,10 @@ bool Genesis::InstallNatives() {
if (FLAG_vector_ics) {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
FeedbackVectorSpec spec(0, 1);
spec.SetKind(0, Code::CALL_IC);
Handle<TypeFeedbackVector> feedback_vector =
factory()->NewTypeFeedbackVector(0, 1);
factory()->NewTypeFeedbackVector(spec);
apply->shared()->set_feedback_vector(*feedback_vector);
}
@ -2062,7 +2079,7 @@ bool Genesis::InstallNatives() {
// Set prototype on map.
initial_map->set_non_instance_prototype(false);
initial_map->set_prototype(*array_prototype);
initial_map->SetPrototype(array_prototype);
// Update map with length accessor from Array and add "index" and "input".
Map::EnsureDescriptorSlack(initial_map, 3);
@ -2110,22 +2127,22 @@ bool Genesis::InstallNatives() {
Handle<AccessorInfo> arguments_iterator =
Accessors::ArgumentsIteratorInfo(isolate(), attribs);
{
CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
arguments_iterator, attribs);
CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
attribs);
Handle<Map> map(native_context()->sloppy_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
arguments_iterator, attribs);
CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
attribs);
Handle<Map> map(native_context()->aliased_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
arguments_iterator, attribs);
CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
attribs);
Handle<Map> map(native_context()->strict_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
@ -2140,17 +2157,11 @@ bool Genesis::InstallNatives() {
}
#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file) \
if (FLAG_##flag && \
strcmp(ExperimentalNatives::GetScriptName(i).start(), "native " file) == \
0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
}
bool Genesis::InstallExperimentalNatives() {
static const char* harmony_arrays_natives[] = {
"native harmony-array.js", "native harmony-typedarray.js", NULL};
static const char* harmony_array_includes_natives[] = {
"native harmony-array-includes.js", NULL};
static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
static const char* harmony_strings_natives[] = {"native harmony-string.js",
NULL};
@ -2159,33 +2170,35 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_modules_natives[] = {NULL};
static const char* harmony_scoping_natives[] = {NULL};
static const char* harmony_object_literals_natives[] = {NULL};
static const char* harmony_regexps_natives[] = {NULL};
static const char* harmony_regexps_natives[] = {
"native harmony-regexp.js", NULL};
static const char* harmony_arrow_functions_natives[] = {NULL};
static const char* harmony_numeric_literals_natives[] = {NULL};
static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
NULL};
static const char* harmony_templates_natives[] = {
"native harmony-templates.js", NULL};
static const char* harmony_sloppy_natives[] = {NULL};
static const char* harmony_unicode_natives[] = {NULL};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
#define INSTALL_EXPERIMENTAL_NATIVES(id, desc) \
if (FLAG_##id) { \
for (size_t j = 0; id##_natives[j] != NULL; j++) { \
if (strcmp(ExperimentalNatives::GetScriptName(i).start(), \
id##_natives[j]) == 0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
} \
} \
}
// Iterate over flags that are not enabled by default.
#define INSTALL_EXPERIMENTAL_NATIVES(id, desc) \
if (FLAG_##id) { \
for (size_t j = 0; id##_natives[j] != NULL; j++) { \
Vector<const char> script_name = ExperimentalNatives::GetScriptName(i); \
if (strncmp(script_name.start(), id##_natives[j], \
script_name.length()) == 0) { \
if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
} \
} \
}
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
#undef INSTALL_EXPERIMENTAL_NATIVES
}
#define USE_NATIVES_FOR_FEATURE(id, descr) USE(id##_natives);
HARMONY_SHIPPING(USE_NATIVES_FOR_FEATURE)
#undef USE_NATIVES_FOR_FEATURE
InstallExperimentalNativeFunctions();
return true;
}
@ -2568,6 +2581,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
JSObject::AddProperty(to, key, constant, details.attributes());
break;
}
case ACCESSOR_FIELD:
UNREACHABLE();
case CALLBACKS: {
Handle<Name> key(descs->GetKey(i));
LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
@ -2578,15 +2593,10 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
// Do not occur since the from object has fast properties.
case NORMAL:
UNREACHABLE();
break;
}
}
} else {
@ -2611,6 +2621,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
isolate());
}
PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(DATA, details.kind());
JSObject::AddProperty(to, key, value, details.attributes());
}
}
@ -2712,6 +2723,15 @@ Genesis::Genesis(Isolate* isolate,
AddToWeakNativeContextList(*native_context());
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
#if TRACE_MAPS
if (FLAG_trace_maps) {
Handle<JSFunction> object_fun = isolate->object_function();
PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
reinterpret_cast<void*>(object_fun->initial_map()),
object_fun->shared()->unique_id());
Map::TraceAllTransitions(object_fun->initial_map());
}
#endif
Handle<GlobalObject> global_object;
Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
global_proxy_template, maybe_global_proxy, &global_object);

4
deps/v8/src/bootstrapper.h

@ -158,8 +158,8 @@ class NativesExternalStringResource FINAL
NativesExternalStringResource(Bootstrapper* bootstrapper,
const char* source,
size_t length);
virtual const char* data() const OVERRIDE { return data_; }
virtual size_t length() const OVERRIDE { return length_; }
const char* data() const OVERRIDE { return data_; }
size_t length() const OVERRIDE { return length_; }
private:
const char* data_;

14
deps/v8/src/builtins.cc

@ -197,7 +197,6 @@ static bool ArrayPrototypeHasNoElements(Heap* heap, PrototypeIterator* iter) {
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
DisallowHeapAllocation no_gc;
PrototypeIterator iter(heap->isolate(), receiver);
return ArrayPrototypeHasNoElements(heap, &iter);
@ -420,6 +419,10 @@ BUILTIN(ArrayPop) {
int len = Smi::cast(array->length())->value();
if (len == 0) return isolate->heap()->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return CallJsBuiltin(isolate, "ArrayPop", args);
}
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
Handle<Object> element =
@ -451,6 +454,10 @@ BUILTIN(ArrayShift) {
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
// Get first element
ElementsAccessor* accessor = array->GetElementsAccessor();
Handle<Object> first =
@ -756,6 +763,11 @@ BUILTIN(ArraySplice) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
if (new_length != len && JSArray::HasReadOnlyLength(array)) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArraySplice", args);
}
if (new_length == 0) {
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);

6
deps/v8/src/checks.h

@ -7,12 +7,6 @@
#include "src/base/logging.h"
#ifdef DEBUG
#ifndef OPTIMIZED_DEBUG
#define ENABLE_SLOW_DCHECKS 1
#endif
#endif
namespace v8 {
class Value;

485
deps/v8/src/code-stubs-hydrogen.cc

@ -8,6 +8,7 @@
#include "src/code-stubs.h"
#include "src/field-index.h"
#include "src/hydrogen.h"
#include "src/ic/ic.h"
#include "src/lithium.h"
namespace v8 {
@ -34,11 +35,11 @@ static LChunk* OptimizeGraph(HGraph* graph) {
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
: HGraphBuilder(&info_),
explicit CodeStubGraphBuilderBase(CompilationInfoWithZone* info)
: HGraphBuilder(info),
arguments_length_(NULL),
info_(stub, isolate),
descriptor_(stub),
info_(info),
descriptor_(info->code_stub()),
context_(NULL) {
int parameter_count = descriptor_.GetEnvironmentParameterCount();
parameters_.Reset(new HParameter*[parameter_count]);
@ -56,10 +57,10 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
DCHECK(arguments_length_ != NULL);
return arguments_length_;
}
CompilationInfo* info() { return &info_; }
HydrogenCodeStub* stub() { return info_.code_stub(); }
CompilationInfo* info() { return info_; }
HydrogenCodeStub* stub() { return info_->code_stub(); }
HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); }
Isolate* isolate() { return info_->isolate(); }
HLoadNamedField* BuildLoadNamedField(HValue* object,
FieldIndex index);
@ -99,6 +100,21 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* shared_info,
HValue* native_context);
// Tail calls handler found at array[map_index + 1].
void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
HValue* map_index, HValue* slot, HValue* vector);
// Tail calls handler_code.
void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
HValue* vector, HValue* handler_code);
void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
HValue* vector, bool keyed_load);
// Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
HValue* slot, HValue* vector, bool keyed_load);
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@ -106,7 +122,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone info_;
CompilationInfoWithZone* info_;
CodeStubDescriptor descriptor_;
HContext* context_;
};
@ -120,7 +136,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling stub %s using hydrogen\n", name);
isolate()->GetHTracer()->TraceCompilation(&info_);
isolate()->GetHTracer()->TraceCompilation(info());
}
int param_count = descriptor_.GetEnvironmentParameterCount();
@ -189,8 +205,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
: CodeStubGraphBuilderBase(isolate, stub) {}
explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
: CodeStubGraphBuilderBase(info) {}
protected:
virtual HValue* BuildCodeStub() {
@ -271,7 +287,8 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
CodeStubGraphBuilder<Stub> builder(isolate, stub);
CompilationInfoWithZone info(stub, isolate);
CodeStubGraphBuilder<Stub> builder(&info);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
Handle<Code> code = chunk->Codegen();
if (FLAG_profile_hydrogen_code_stub_compilation) {
@ -306,10 +323,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
// so that it doesn't build and eager frame.
info()->MarkMustNotHaveEagerFrame();
HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
GetParameter(1),
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
HInstruction* allocation_site =
Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
undefined);
@ -317,8 +332,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(
allocation_site, static_cast<HValue*>(NULL), access);
HInstruction* boilerplate =
Add<HLoadNamedField>(allocation_site, nullptr, access);
HValue* elements = AddLoadElements(boilerplate);
HValue* capacity = AddLoadFixedArrayLength(elements);
IfBuilder zero_capacity(this);
@ -370,10 +385,8 @@ template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
GetParameter(1),
static_cast<HValue*>(NULL),
FAST_ELEMENTS);
HInstruction* allocation_site =
Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@ -382,8 +395,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
AllocationSite::kTransitionInfoOffset);
HInstruction* boilerplate = Add<HLoadNamedField>(
allocation_site, static_cast<HValue*>(NULL), access);
HInstruction* boilerplate =
Add<HLoadNamedField>(allocation_site, nullptr, access);
int length = casted_stub()->length();
if (length == 0) {
@ -396,12 +409,10 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
size += AllocationMemento::kSize;
}
HValue* boilerplate_map = Add<HLoadNamedField>(
boilerplate, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* boilerplate_map =
Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap());
HValue* boilerplate_size = Add<HLoadNamedField>(
boilerplate_map, static_cast<HValue*>(NULL),
HObjectAccess::ForMapInstanceSize());
boilerplate_map, nullptr, HObjectAccess::ForMapInstanceSize());
HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
@ -414,9 +425,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
for (int i = 0; i < object_size; i += kPointerSize) {
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
Add<HStoreNamedField>(
object, access, Add<HLoadNamedField>(
boilerplate, static_cast<HValue*>(NULL), access));
Add<HStoreNamedField>(object, access,
Add<HLoadNamedField>(boilerplate, nullptr, access));
}
DCHECK(FLAG_allocation_site_pretenuring || (size == object_size));
@ -485,9 +495,8 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
HValue* site = Add<HLoadNamedField>(
site_list, static_cast<HValue*>(NULL),
HObjectAccess::ForAllocationSiteList());
HValue* site = Add<HLoadNamedField>(site_list, nullptr,
HObjectAccess::ForAllocationSiteList());
// TODO(mvstanton): This is a store to a weak pointer, which we may want to
// mark as such in order to skip the write barrier, once we have a unified
// system for weakness. For now we decided to keep it like this because having
@ -513,6 +522,40 @@ Handle<Code> CreateAllocationSiteStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
int context_index = casted_stub()->context_index();
int slot_index = casted_stub()->slot_index();
HValue* script_context = BuildGetScriptContext(context_index);
return Add<HLoadNamedField>(script_context, nullptr,
HObjectAccess::ForContextSlot(slot_index));
}
Handle<Code> LoadScriptContextFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<StoreScriptContextFieldStub>::BuildCodeStub() {
int context_index = casted_stub()->context_index();
int slot_index = casted_stub()->slot_index();
HValue* script_context = BuildGetScriptContext(context_index);
Add<HStoreNamedField>(script_context,
HObjectAccess::ForContextSlot(slot_index),
GetParameter(2), STORE_TO_INITIALIZED_ENTRY);
return GetParameter(2);
}
Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
@ -538,15 +581,15 @@ HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
HObjectAccess access = index.is_inobject()
? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
: HObjectAccess::ForBackingStoreOffset(offset, representation);
if (index.is_double()) {
if (index.is_double() &&
(!FLAG_unbox_double_fields || !index.is_inobject())) {
// Load the heap number.
object = Add<HLoadNamedField>(
object, static_cast<HValue*>(NULL),
access.WithRepresentation(Representation::Tagged()));
object, nullptr, access.WithRepresentation(Representation::Tagged()));
// Load the double value from it.
access = HObjectAccess::ForHeapNumberValue();
}
return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
return Add<HLoadNamedField>(object, nullptr, access);
}
@ -566,12 +609,10 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
HValue* map = AddLoadMap(GetParameter(0), NULL);
HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
Map::kDescriptorsOffset, Representation::Tagged());
HValue* descriptors =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), descriptors_access);
HValue* descriptors = Add<HLoadNamedField>(map, nullptr, descriptors_access);
HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
return Add<HLoadNamedField>(descriptors, static_cast<HValue*>(NULL),
value_access);
return Add<HLoadNamedField>(descriptors, nullptr, value_access);
}
@ -580,20 +621,19 @@ Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
HValue* result;
HInstruction* backing_store = Add<HLoadKeyed>(
elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
HInstruction* backing_store =
Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, FAST_ELEMENTS,
ALLOW_RETURN_HOLE);
Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
HValue* backing_store_length =
Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
HValue* backing_store_length = Add<HLoadNamedField>(
backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
IfBuilder in_unmapped_range(this);
in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
Token::LT);
in_unmapped_range.Then();
{
result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
NEVER_RETURN_HOLE);
}
in_unmapped_range.ElseDeopt("Outside of range");
in_unmapped_range.End();
@ -640,19 +680,17 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
positive_smi.End();
HValue* constant_two = Add<HConstant>(2);
HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
HValue* elements_length =
Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
HValue* elements = AddLoadElements(receiver, nullptr);
HValue* elements_length = Add<HLoadNamedField>(
elements, nullptr, HObjectAccess::ForFixedArrayLength());
HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
IfBuilder in_range(this);
in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
in_range.Then();
{
HValue* index = AddUncasted<HAdd>(key, constant_two);
HInstruction* mapped_index =
Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
HInstruction* mapped_index = Add<HLoadKeyed>(
elements, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder is_valid(this);
is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
@ -662,13 +700,11 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
// TODO(mvstanton): I'd like to assert from this point, that if the
// mapped_index is not the hole that it is indeed, a smi. An unnecessary
// smi check is being emitted.
HValue* the_context =
Add<HLoadKeyed>(elements, graph()->GetConstant0(),
static_cast<HValue*>(NULL), FAST_ELEMENTS);
HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
nullptr, FAST_ELEMENTS);
DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
HValue* result =
Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
environment()->Push(result);
}
is_valid.Else();
@ -705,30 +741,31 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
: HObjectAccess::ForBackingStoreOffset(offset, representation);
if (representation.IsDouble()) {
HObjectAccess heap_number_access =
access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number =
Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
MUTABLE_HEAP_NUMBER_TYPE);
AddStoreMapConstant(heap_number,
isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
// Store the new mutable heap number into the object.
access = heap_number_access;
value = heap_number;
} else {
// Load the heap number.
object = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
heap_number_access);
// Store the double value into it.
access = HObjectAccess::ForHeapNumberValue();
if (!FLAG_unbox_double_fields || !index.is_inobject()) {
HObjectAccess heap_number_access =
access.WithRepresentation(Representation::Tagged());
if (transition_to_field) {
// The store requires a mutable HeapNumber to be allocated.
NoObservableSideEffectsScope no_side_effects(this);
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number =
Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
MUTABLE_HEAP_NUMBER_TYPE);
AddStoreMapConstant(heap_number,
isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
// Store the new mutable heap number into the object.
access = heap_number_access;
value = heap_number;
} else {
// Load the heap number.
object = Add<HLoadNamedField>(object, nullptr, heap_number_access);
// Store the double value into it.
access = HObjectAccess::ForHeapNumberValue();
}
}
} else if (representation.IsHeapObject()) {
BuildCheckHeapObject(value);
@ -755,9 +792,8 @@ HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
switch (casted_stub()->store_mode()) {
case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
HValue* properties =
Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
HObjectAccess::ForPropertiesPointer());
HValue* properties = Add<HLoadNamedField>(
object, nullptr, HObjectAccess::ForPropertiesPointer());
HValue* length = AddLoadFixedArrayLength(properties);
HValue* delta =
Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
@ -1066,7 +1102,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Type* type = stub->GetType(zone(), sentinel_map);
BuildCompareNil(GetParameter(0), type, &continuation);
BuildCompareNil(GetParameter(0), type, &continuation, kEmbedMapsViaWeakCells);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
@ -1291,8 +1327,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
HValue* cell_contents = Add<HLoadNamedField>(
cell, static_cast<HValue*>(NULL), access);
HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
if (stub->is_constant()) {
IfBuilder builder(this);
@ -1390,7 +1425,7 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(
native_context, static_cast<HValue*>(NULL),
native_context, nullptr,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
@ -1410,8 +1445,8 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
HValue* code_object = Add<HLoadNamedField>(
shared_info, static_cast<HValue*>(NULL), HObjectAccess::ForCodeOffset());
HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
@ -1428,8 +1463,8 @@ HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
HValue* field_offset_value = Add<HConstant>(field_offset);
field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
}
HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
static_cast<HValue*>(NULL), FAST_ELEMENTS);
HInstruction* field_entry =
Add<HLoadKeyed>(optimized_map, field_slot, nullptr, FAST_ELEMENTS);
return field_entry;
}
@ -1441,8 +1476,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
Counters* counters = isolate()->counters();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(
shared_info, static_cast<HValue*>(NULL),
HObjectAccess::ForOptimizedCodeMap());
shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
is_optimized.Then();
@ -1475,8 +1509,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
LoopBuilder::kPostDecrement,
shared_function_entry_length);
HValue* array_length = Add<HLoadNamedField>(
optimized_map, static_cast<HValue*>(NULL),
HObjectAccess::ForFixedArrayLength());
optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
HValue* start_pos = AddUncasted<HSub>(array_length,
shared_function_entry_length);
HValue* slot_iterator = loop_builder.BeginBody(start_pos,
@ -1520,8 +1553,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
HInstruction* js_function =
Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
casted_stub()->kind());
@ -1530,8 +1563,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
// as the map of the allocated object.
HInstruction* native_context = BuildGetNativeContext();
HInstruction* map_slot_value = Add<HLoadNamedField>(
native_context, static_cast<HValue*>(NULL),
HObjectAccess::ForContextSlot(map_index));
native_context, nullptr, HObjectAccess::ForContextSlot(map_index));
Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
// Initialize the rest of the function.
@ -1543,9 +1575,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
graph()->GetConstantHole());
Add<HStoreNamedField>(js_function,
HObjectAccess::ForSharedFunctionInfoPointer(),
shared_info);
Add<HStoreNamedField>(
js_function, HObjectAccess::ForSharedFunctionInfoPointer(), shared_info);
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
context());
@ -1599,7 +1630,7 @@ HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
// Copy the global object from the previous context.
HValue* global_object = Add<HLoadNamedField>(
context(), static_cast<HValue*>(NULL),
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
Add<HStoreNamedField>(function_context,
HObjectAccess::ForContextSlot(
@ -1664,8 +1695,8 @@ template <>
class CodeStubGraphBuilder<KeyedLoadGenericStub>
: public CodeStubGraphBuilderBase {
public:
CodeStubGraphBuilder(Isolate* isolate, KeyedLoadGenericStub* stub)
: CodeStubGraphBuilderBase(isolate, stub) {}
explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
: CodeStubGraphBuilderBase(info) {}
protected:
virtual HValue* BuildCodeStub();
@ -1764,16 +1795,14 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
(1 << Map::kHasIndexedInterceptor);
BuildJSObjectCheck(receiver, bit_field_mask);
HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* instance_type =
Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
HObjectAccess::ForMapInstanceType());
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
HValue* bit_field2 = Add<HLoadNamedField>(map,
static_cast<HValue*>(NULL),
HObjectAccess::ForMapBitField2());
HValue* bit_field2 =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
IfBuilder kind_if(this);
BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
@ -1863,12 +1892,10 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
BuildNonGlobalObjectCheck(receiver);
HValue* properties = Add<HLoadNamedField>(
receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForPropertiesPointer());
receiver, nullptr, HObjectAccess::ForPropertiesPointer());
HValue* hash =
Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
HObjectAccess::ForNameHashField());
Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForNameHashField());
hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
@ -1887,8 +1914,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
ExternalReference::keyed_lookup_cache_keys(isolate());
HValue* cache_keys = Add<HConstant>(cache_keys_ref);
HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
HObjectAccess::ForMap());
HValue* map =
Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
base_index->ClearFlag(HValue::kCanOverflow);
@ -1910,13 +1937,13 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
key_index->ClearFlag(HValue::kCanOverflow);
HValue* map_to_check =
Add<HLoadKeyed>(cache_keys, map_index, static_cast<HValue*>(NULL),
FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
Add<HLoadKeyed>(cache_keys, map_index, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
lookup_if->And();
HValue* key_to_check =
Add<HLoadKeyed>(cache_keys, key_index, static_cast<HValue*>(NULL),
FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
Add<HLoadKeyed>(cache_keys, key_index, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, 0);
lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
lookup_if->Then();
{
@ -1926,9 +1953,9 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
Add<HConstant>(cache_field_offsets_ref);
HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
index->ClearFlag(HValue::kCanOverflow);
HValue* property_index = Add<HLoadKeyed>(
cache_field_offsets, index, static_cast<HValue*>(NULL),
EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
HValue* property_index =
Add<HLoadKeyed>(cache_field_offsets, index, nullptr,
EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
Push(property_index);
}
lookup_if->Else();
@ -1967,11 +1994,129 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
}
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
HValue* array, HValue* map_index,
HValue* slot, HValue* vector) {
// The handler is at array[map_index + 1]. Compute this with a custom offset
// to HLoadKeyed.
int offset =
GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
HValue* handler_code = Add<HLoadKeyed>(
array, map_index, nullptr, FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
TailCallHandler(receiver, name, slot, vector, handler_code);
}
void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
HValue* slot, HValue* vector,
HValue* handler_code) {
VectorLoadICDescriptor descriptor(isolate());
HValue* op_vals[] = {context(), receiver, name, slot, vector};
Add<HCallWithDescriptor>(handler_code, 0, descriptor,
Vector<HValue*>(op_vals, 5), TAIL_CALL);
// We never return here, it is a tail call.
}
void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
HValue* slot, HValue* vector,
bool keyed_load) {
DCHECK(FLAG_vector_ics);
Add<HTailCallThroughMegamorphicCache>(
receiver, name, slot, vector,
HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
// We never return here, it is a tail call.
}
void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
HValue* name, HValue* slot,
HValue* vector,
bool keyed_load) {
IfBuilder if_receiver_heap_object(this);
if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
if_receiver_heap_object.Then();
{
HConstant* constant_two = Add<HConstant>(2);
HConstant* constant_three = Add<HConstant>(3);
HValue* receiver_map = AddLoadMap(receiver, nullptr);
HValue* start =
keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
HValue* weak_cell = Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS,
ALLOW_RETURN_HOLE);
// Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
// against the receiver_map.
HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
HObjectAccess::ForWeakCellValue());
IfBuilder if_correct_map(this);
if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
if_correct_map.Then();
{ TailCallHandler(receiver, name, array, start, slot, vector); }
if_correct_map.Else();
{
// If our array has more elements, the ic is polymorphic. Look for the
// receiver map in the rest of the array.
HValue* length = AddLoadFixedArrayLength(array, nullptr);
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
constant_two);
start = keyed_load ? constant_three : constant_two;
HValue* key = builder.BeginBody(start, length, Token::LT);
{
HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
ALLOW_RETURN_HOLE);
HValue* array_map = Add<HLoadNamedField>(
weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
IfBuilder if_correct_poly_map(this);
if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
array_map);
if_correct_poly_map.Then();
{ TailCallHandler(receiver, name, array, key, slot, vector); }
}
builder.EndBody();
}
if_correct_map.End();
}
}
template <>
HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
return receiver;
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
// If the feedback is an array, then the IC is in the monomorphic or
// polymorphic state.
HValue* feedback =
Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder array_checker(this);
array_checker.If<HCompareMap>(feedback,
isolate()->factory()->fixed_array_map());
array_checker.Then();
{ HandleArrayCases(feedback, receiver, name, slot, vector, false); }
array_checker.Else();
{
// Is the IC megamorphic?
IfBuilder mega_checker(this);
HConstant* megamorphic_symbol =
Add<HConstant>(isolate()->factory()->megamorphic_symbol());
mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
mega_checker.Then();
{
// Probe the stub cache.
Add<HTailCallThroughMegamorphicCache>(
receiver, name, slot, vector,
HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
}
mega_checker.End();
}
array_checker.End();
TailCallMiss(receiver, name, slot, vector, false);
return graph()->GetConstant0();
}
@ -1981,8 +2126,65 @@ Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
return receiver;
HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
HConstant* zero = graph()->GetConstant0();
// If the feedback is an array, then the IC is in the monomorphic or
// polymorphic state.
HValue* feedback =
Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder array_checker(this);
array_checker.If<HCompareMap>(feedback,
isolate()->factory()->fixed_array_map());
array_checker.Then();
{
// If feedback[0] is 0, then the IC has element handlers and name should be
// a smi. If feedback[0] is a string, verify that it matches name.
HValue* recorded_name = Add<HLoadKeyed>(feedback, zero, nullptr,
FAST_ELEMENTS, ALLOW_RETURN_HOLE);
IfBuilder recorded_name_is_zero(this);
recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
recorded_name_is_zero.Then();
{ Add<HCheckSmi>(name); }
recorded_name_is_zero.Else();
{
IfBuilder strings_match(this);
strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
strings_match.Then();
TailCallMiss(receiver, name, slot, vector, true);
strings_match.End();
}
recorded_name_is_zero.End();
HandleArrayCases(feedback, receiver, name, slot, vector, true);
}
array_checker.Else();
{
// Check if the IC is in generic state.
IfBuilder generic_checker(this);
HConstant* generic_symbol =
Add<HConstant>(isolate()->factory()->generic_symbol());
generic_checker.If<HCompareObjectEqAndBranch>(feedback, generic_symbol);
generic_checker.Then();
{
// Tail-call to the generic KeyedLoadIC, treating it like a handler.
Handle<Code> stub = KeyedLoadIC::generic_stub(isolate());
HValue* constant_stub = Add<HConstant>(stub);
LoadDescriptor descriptor(isolate());
HValue* op_vals[] = {context(), receiver, name};
Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
Vector<HValue*>(op_vals, 3), TAIL_CALL);
// We never return here, it is a tail call.
}
generic_checker.End();
}
array_checker.End();
TailCallMiss(receiver, name, slot, vector, true);
return zero;
}
@ -1998,14 +2200,15 @@ Handle<Code> MegamorphicLoadStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
// The return address is on the stack.
HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
HValue* name = GetParameter(LoadDescriptor::kNameIndex);
// We shouldn't generate this when FLAG_vector_ics is true because the
// megamorphic case is handled as part of the default stub.
DCHECK(!FLAG_vector_ics);
// Probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
Add<HTailCallThroughMegamorphicCache>(receiver, name);
// We never continue.
return graph()->GetConstant0();

1
deps/v8/src/code-stubs.cc

@ -75,7 +75,6 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
void CodeStub::RecordCodeGeneration(Handle<Code> code) {
IC::RegisterWeakMapDependency(code);
std::ostringstream os;
os << *this;
PROFILE(isolate(),

347
deps/v8/src/code-stubs.h

@ -69,6 +69,7 @@ namespace internal {
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
V(KeyedLoadGeneric) \
V(LoadScriptContextField) \
V(LoadDictionaryElement) \
V(LoadFastElement) \
V(MegamorphicLoad) \
@ -76,6 +77,7 @@ namespace internal {
V(NumberToString) \
V(RegExpConstructResult) \
V(StoreFastElement) \
V(StoreScriptContextField) \
V(StringAdd) \
V(ToBoolean) \
V(TransitionElementsKind) \
@ -92,9 +94,7 @@ namespace internal {
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \
V(DirectCEntry) \
V(WriteInt32ToHeapNumber)
#define CODE_STUB_LIST_ARM(V) V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
@ -113,17 +113,15 @@ namespace internal {
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(DirectCEntry) \
V(RestoreRegistersState) \
V(StoreRegistersState) \
V(WriteInt32ToHeapNumber)
#define CODE_STUB_LIST_MIPS(V) \
V(DirectCEntry) \
V(RestoreRegistersState) \
V(StoreRegistersState)
#elif V8_TARGET_ARCH_MIPS64
#define CODE_STUB_LIST_MIPS(V) \
V(DirectCEntry) \
V(RestoreRegistersState) \
V(StoreRegistersState) \
V(WriteInt32ToHeapNumber)
#define CODE_STUB_LIST_MIPS(V) \
V(DirectCEntry) \
V(RestoreRegistersState) \
V(StoreRegistersState)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
@ -291,54 +289,52 @@ class CodeStub BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(NAME)
#define DEFINE_CODE_STUB(NAME, SUPER) \
protected: \
virtual inline Major MajorKey() const OVERRIDE { \
return NAME; \
}; \
#define DEFINE_CODE_STUB(NAME, SUPER) \
protected: \
inline Major MajorKey() const OVERRIDE { return NAME; }; \
DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER) \
private: \
virtual void Generate(MacroAssembler* masm) OVERRIDE; \
#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER) \
private: \
void Generate(MacroAssembler* masm) OVERRIDE; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER) \
public: \
virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
virtual Handle<Code> GenerateCode() OVERRIDE; \
#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER) \
public: \
void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
Handle<Code> GenerateCode() OVERRIDE; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
public: \
virtual Handle<Code> GenerateCode() OVERRIDE; \
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
public: \
Handle<Code> GenerateCode() OVERRIDE; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
public: \
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
return NAME##Descriptor(isolate()); \
#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
public: \
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
return NAME##Descriptor(isolate()); \
}
// There are some code stubs we just can't describe right now with a
// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
// An attempt to retrieve a descriptor will fail.
#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
public: \
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
UNREACHABLE(); \
return CallInterfaceDescriptor(); \
#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR() \
public: \
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
UNREACHABLE(); \
return CallInterfaceDescriptor(); \
}
class PlatformCodeStub : public CodeStub {
public:
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() OVERRIDE;
Handle<Code> GenerateCode() OVERRIDE;
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
protected:
explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
@ -438,7 +434,7 @@ class HydrogenCodeStub : public CodeStub {
INITIALIZED
};
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
@ -447,7 +443,7 @@ class HydrogenCodeStub : public CodeStub {
}
// Retrieve the code for the stub. Generate the code if needed.
virtual Handle<Code> GenerateCode() = 0;
Handle<Code> GenerateCode() OVERRIDE = 0;
bool IsUninitialized() const { return IsMissBits::decode(minor_key_); }
@ -577,10 +573,11 @@ class FastNewClosureStub : public HydrogenCodeStub {
bool is_arrow() const { return IsArrowFunction(kind()); }
bool is_generator() const { return IsGeneratorFunction(kind()); }
bool is_concise_method() const { return IsConciseMethod(kind()); }
bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
private:
class StrictModeBits : public BitField<StrictMode, 0, 1> {};
class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
class FunctionKindBits : public BitField<FunctionKind, 1, 4> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@ -679,7 +676,7 @@ class InstanceofStub: public PlatformCodeStub {
static Register left() { return InstanceofDescriptor::left(); }
static Register right() { return InstanceofDescriptor::right(); }
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (HasArgsInRegisters()) {
return InstanceofDescriptor(isolate());
}
@ -699,7 +696,7 @@ class InstanceofStub: public PlatformCodeStub {
return (flags() & kReturnTrueFalseObject) != 0;
}
virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
class FlagBits : public BitField<Flags, 0, 3> {};
@ -730,7 +727,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
@ -760,7 +757,7 @@ class MathPowStub: public PlatformCodeStub {
minor_key_ = ExponentTypeBits::encode(exponent_type);
}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (exponent_type() == TAGGED) {
return MathPowTaggedDescriptor(isolate());
} else if (exponent_type() == INTEGER) {
@ -793,11 +790,11 @@ class CallICStub: public PlatformCodeStub {
return state.arg_count();
}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
virtual InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(minor_key_);
}
@ -816,7 +813,7 @@ class CallICStub: public PlatformCodeStub {
void GenerateMiss(MacroAssembler* masm);
private:
virtual void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
@ -828,12 +825,10 @@ class CallIC_ArrayStub: public CallICStub {
CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
: CallICStub(isolate, state_in) {}
virtual InlineCacheState GetICState() const FINAL OVERRIDE {
return MONOMORPHIC;
}
InlineCacheState GetICState() const FINAL { return MONOMORPHIC; }
private:
virtual void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
DEFINE_PLATFORM_CODE_STUB(CallIC_Array, CallICStub);
};
@ -845,12 +840,12 @@ class FunctionPrototypeStub : public PlatformCodeStub {
explicit FunctionPrototypeStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
// TODO(mvstanton): only the receiver register is accessed. When this is
// translated to a hydrogen code stub, a new CallInterfaceDescriptor
// should be created that just uses that register for more efficient code.
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (FLAG_vector_ics) {
return VectorLoadICDescriptor(isolate());
}
@ -867,8 +862,8 @@ class LoadIndexedInterceptorStub : public PlatformCodeStub {
explicit LoadIndexedInterceptorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
@ -880,8 +875,8 @@ class LoadIndexedStringStub : public PlatformCodeStub {
explicit LoadIndexedStringStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
@ -890,13 +885,13 @@ class LoadIndexedStringStub : public PlatformCodeStub {
class HandlerStub : public HydrogenCodeStub {
public:
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
virtual ExtraICState GetExtraICState() const OVERRIDE { return kind(); }
virtual InlineCacheState GetICState() const OVERRIDE { return MONOMORPHIC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
ExtraICState GetExtraICState() const OVERRIDE { return kind(); }
InlineCacheState GetICState() const OVERRIDE { return MONOMORPHIC; }
virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
protected:
explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@ -920,8 +915,8 @@ class LoadFieldStub: public HandlerStub {
}
protected:
virtual Code::Kind kind() const { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
private:
class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
@ -936,8 +931,8 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
: HandlerStub(isolate) {}
protected:
virtual Code::Kind kind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
private:
DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
@ -956,8 +951,8 @@ class LoadConstantStub : public HandlerStub {
}
protected:
virtual Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
private:
class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
@ -971,8 +966,8 @@ class StringLengthStub: public HandlerStub {
explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
protected:
virtual Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
};
@ -1000,8 +995,8 @@ class StoreFieldStub : public HandlerStub {
}
protected:
virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@ -1050,11 +1045,11 @@ class StoreTransitionStub : public HandlerStub {
return StoreModeBits::decode(sub_minor_key());
}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
protected:
virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@ -1092,7 +1087,7 @@ class StoreGlobalStub : public HandlerStub {
}
}
virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
bool is_constant() const { return IsConstantBits::decode(sub_minor_key()); }
@ -1174,15 +1169,11 @@ class BinaryOpICStub : public HydrogenCodeStub {
static void GenerateAheadOfTime(Isolate* isolate);
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::BINARY_OP_IC;
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() const FINAL OVERRIDE {
return state().GetICState();
}
InlineCacheState GetICState() const FINAL { return state().GetICState(); }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(sub_minor_key());
}
@ -1190,7 +1181,7 @@ class BinaryOpICStub : public HydrogenCodeStub {
return BinaryOpICState(isolate(), GetExtraICState());
}
virtual void PrintState(std::ostream& os) const FINAL OVERRIDE; // NOLINT
void PrintState(std::ostream& os) const FINAL; // NOLINT
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kLeft = 0;
@ -1223,19 +1214,15 @@ class BinaryOpICWithAllocationSiteStub FINAL : public PlatformCodeStub {
return CodeStub::GetCodeCopy(pattern);
}
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::BINARY_OP_IC;
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() const OVERRIDE {
return state().GetICState();
}
InlineCacheState GetICState() const OVERRIDE { return state().GetICState(); }
virtual ExtraICState GetExtraICState() const OVERRIDE {
ExtraICState GetExtraICState() const OVERRIDE {
return static_cast<ExtraICState>(minor_key_);
}
virtual void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
private:
BinaryOpICState state() const {
@ -1260,9 +1247,7 @@ class BinaryOpWithAllocationSiteStub FINAL : public BinaryOpICStub {
BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
: BinaryOpICStub(isolate, state) {}
virtual Code::Kind GetCodeKind() const FINAL OVERRIDE {
return Code::STUB;
}
Code::Kind GetCodeKind() const FINAL { return Code::STUB; }
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kAllocationSite = 0;
@ -1311,7 +1296,7 @@ class StringAddStub FINAL : public HydrogenCodeStub {
class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
virtual void PrintBaseName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintBaseName(std::ostream& os) const OVERRIDE; // NOLINT
DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
@ -1330,7 +1315,7 @@ class CompareICStub : public PlatformCodeStub {
void set_known_map(Handle<Map> map) { known_map_ = map; }
virtual InlineCacheState GetICState() const OVERRIDE;
InlineCacheState GetICState() const OVERRIDE;
Token::Value op() const {
return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
@ -1345,7 +1330,7 @@ class CompareICStub : public PlatformCodeStub {
CompareICState::State state() const { return StateBits::decode(minor_key_); }
private:
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_IC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_IC; }
void GenerateSmis(MacroAssembler* masm);
void GenerateNumbers(MacroAssembler* masm);
@ -1360,9 +1345,9 @@ class CompareICStub : public PlatformCodeStub {
bool strict() const { return op() == Token::EQ_STRICT; }
Condition GetCondition() const;
virtual void AddToSpecialCache(Handle<Code> new_object) OVERRIDE;
virtual bool FindCodeInSpecialCache(Code** code_out) OVERRIDE;
virtual bool UseSpecialCache() OVERRIDE {
void AddToSpecialCache(Handle<Code> new_object) OVERRIDE;
bool FindCodeInSpecialCache(Code** code_out) OVERRIDE;
bool UseSpecialCache() OVERRIDE {
return state() == CompareICState::KNOWN_OBJECT;
}
@ -1398,7 +1383,7 @@ class CompareNilICStub : public HydrogenCodeStub {
return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
}
virtual InlineCacheState GetICState() const OVERRIDE {
InlineCacheState GetICState() const OVERRIDE {
State state = this->state();
if (state.Contains(GENERIC)) {
return MEGAMORPHIC;
@ -1409,13 +1394,9 @@ class CompareNilICStub : public HydrogenCodeStub {
}
}
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::COMPARE_NIL_IC;
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_NIL_IC; }
virtual ExtraICState GetExtraICState() const OVERRIDE {
return sub_minor_key();
}
ExtraICState GetExtraICState() const OVERRIDE { return sub_minor_key(); }
void UpdateStatus(Handle<Object> object);
@ -1427,8 +1408,8 @@ class CompareNilICStub : public HydrogenCodeStub {
set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
}
virtual void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
virtual void PrintBaseName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
void PrintBaseName(std::ostream& os) const OVERRIDE; // NOLINT
private:
CompareNilICStub(Isolate* isolate, NilValue nil,
@ -1515,9 +1496,9 @@ class JSEntryStub : public PlatformCodeStub {
}
private:
virtual void FinishCode(Handle<Code> code) OVERRIDE;
void FinishCode(Handle<Code> code) OVERRIDE;
virtual void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
os << (type() == StackFrame::ENTRY ? "JSEntryStub"
: "JSConstructEntryStub");
}
@ -1548,7 +1529,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
minor_key_ = TypeBits::encode(type);
}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (type() == READ_ELEMENT) {
return ArgumentsAccessReadDescriptor(isolate());
}
@ -1563,7 +1544,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
void GenerateNewSloppyFast(MacroAssembler* masm);
void GenerateNewSloppySlow(MacroAssembler* masm);
virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
class TypeBits : public BitField<Type, 0, 2> {};
@ -1617,7 +1598,7 @@ class CallFunctionStub: public PlatformCodeStub {
bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
@ -1636,7 +1617,7 @@ class CallConstructStub: public PlatformCodeStub {
minor_key_ = FlagBits::encode(flags);
}
virtual void FinishCode(Handle<Code> code) OVERRIDE {
void FinishCode(Handle<Code> code) OVERRIDE {
code->set_has_function_cache(RecordCallTarget());
}
@ -1647,7 +1628,7 @@ class CallConstructStub: public PlatformCodeStub {
return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
}
virtual void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
void PrintName(std::ostream& os) const OVERRIDE; // NOLINT
class FlagBits : public BitField<CallConstructorFlags, 0, 1> {};
@ -1837,7 +1818,7 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
explicit LoadDictionaryElementStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (FLAG_vector_ics) {
return VectorLoadICDescriptor(isolate());
}
@ -1852,10 +1833,8 @@ class KeyedLoadGenericStub : public HydrogenCodeStub {
public:
explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::KEYED_LOAD_IC;
}
virtual InlineCacheState GetICState() const OVERRIDE { return GENERIC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
InlineCacheState GetICState() const OVERRIDE { return GENERIC; }
// Since KeyedLoadGeneric stub doesn't miss (simply calls runtime), it
// doesn't need to use the VectorLoadICDescriptor for the case when
@ -1873,11 +1852,11 @@ class LoadICTrampolineStub : public PlatformCodeStub {
minor_key_ = state.GetExtraICState();
}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
InlineCacheState GetICState() const FINAL { return DEFAULT; }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(minor_key_);
}
@ -1896,9 +1875,7 @@ class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
explicit KeyedLoadICTrampolineStub(Isolate* isolate)
: LoadICTrampolineStub(isolate, LoadICState(0)) {}
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::KEYED_LOAD_IC;
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
};
@ -1911,17 +1888,15 @@ class MegamorphicLoadStub : public HydrogenCodeStub {
set_sub_minor_key(state.GetExtraICState());
}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
virtual InlineCacheState GetICState() const FINAL OVERRIDE {
return MEGAMORPHIC;
}
InlineCacheState GetICState() const FINAL { return MEGAMORPHIC; }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(sub_minor_key());
}
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (FLAG_vector_ics) {
return VectorLoadICDescriptor(isolate());
}
@ -1939,11 +1914,11 @@ class VectorLoadStub : public HydrogenCodeStub {
set_sub_minor_key(state.GetExtraICState());
}
virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
InlineCacheState GetICState() const FINAL { return DEFAULT; }
virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
ExtraICState GetExtraICState() const FINAL {
return static_cast<ExtraICState>(sub_minor_key());
}
@ -1960,9 +1935,7 @@ class VectorKeyedLoadStub : public VectorLoadStub {
explicit VectorKeyedLoadStub(Isolate* isolate)
: VectorLoadStub(isolate, LoadICState(0)) {}
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::KEYED_LOAD_IC;
}
Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
@ -1982,7 +1955,7 @@ class DoubleToIStub : public PlatformCodeStub {
SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
}
virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
Register source() const {
@ -2016,6 +1989,66 @@ class DoubleToIStub : public PlatformCodeStub {
};
class ScriptContextFieldStub : public HandlerStub {
public:
ScriptContextFieldStub(Isolate* isolate,
const ScriptContextTable::LookupResult* lookup_result)
: HandlerStub(isolate) {
DCHECK(Accepted(lookup_result));
set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
SlotIndexBits::encode(lookup_result->slot_index));
}
int context_index() const {
return ContextIndexBits::decode(sub_minor_key());
}
int slot_index() const { return SlotIndexBits::decode(sub_minor_key()); }
static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
return ContextIndexBits::is_valid(lookup_result->context_index) &&
SlotIndexBits::is_valid(lookup_result->slot_index);
}
private:
static const int kContextIndexBits = 13;
static const int kSlotIndexBits = 13;
class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
class SlotIndexBits
: public BitField<int, kContextIndexBits, kSlotIndexBits> {};
Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
};
class LoadScriptContextFieldStub : public ScriptContextFieldStub {
public:
LoadScriptContextFieldStub(
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
private:
Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
DEFINE_HANDLER_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
};
class StoreScriptContextFieldStub : public ScriptContextFieldStub {
public:
StoreScriptContextFieldStub(
Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
: ScriptContextFieldStub(isolate, lookup_result) {}
private:
Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
DEFINE_HANDLER_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
};
class LoadFastElementStub : public HydrogenCodeStub {
public:
LoadFastElementStub(Isolate* isolate, bool is_js_array,
@ -2035,7 +2068,7 @@ class LoadFastElementStub : public HydrogenCodeStub {
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class IsJSArrayBits: public BitField<bool, 8, 1> {};
virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
if (FLAG_vector_ics) {
return VectorLoadICDescriptor(isolate());
}
@ -2171,7 +2204,7 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
}
private:
virtual void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
BasePrintName(os, "ArrayNoArgumentConstructorStub");
}
@ -2191,7 +2224,7 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
}
private:
virtual void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
BasePrintName(os, "ArraySingleArgumentConstructorStub");
}
@ -2211,7 +2244,7 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
}
private:
virtual void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
void PrintName(std::ostream& os) const OVERRIDE { // NOLINT
BasePrintName(os, "ArrayNArgumentsConstructorStub");
}
@ -2355,22 +2388,18 @@ class ToBooleanStub: public HydrogenCodeStub {
Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
virtual Code::Kind GetCodeKind() const OVERRIDE {
return Code::TO_BOOLEAN_IC;
}
virtual void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
Code::Kind GetCodeKind() const OVERRIDE { return Code::TO_BOOLEAN_IC; }
void PrintState(std::ostream& os) const OVERRIDE; // NOLINT
virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static Handle<Code> GetUninitialized(Isolate* isolate) {
return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
}
virtual ExtraICState GetExtraICState() const OVERRIDE {
return types().ToIntegral();
}
ExtraICState GetExtraICState() const OVERRIDE { return types().ToIntegral(); }
virtual InlineCacheState GetICState() const OVERRIDE {
InlineCacheState GetICState() const OVERRIDE {
if (types().IsEmpty()) {
return ::v8::internal::UNINITIALIZED;
} else {
@ -2482,7 +2511,7 @@ class ProfileEntryHookStub : public PlatformCodeStub {
explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
// The profile entry hook function is not allowed to cause a GC.
virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
// Generates a call to the entry hook if it's enabled.
static void MaybeCallEntryHook(MacroAssembler* masm);
@ -2507,7 +2536,7 @@ class StoreBufferOverflowStub : public PlatformCodeStub {
}
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }

2
deps/v8/src/collection-iterator.js

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
"use strict";
// This file relies on the fact that the following declaration has been made

26
deps/v8/src/collection.js

@ -30,7 +30,7 @@ function SetConstructor(iterable) {
}
}
%SetInitialize(this);
%_SetInitialize(this);
if (IS_UNDEFINED(iter)) return;
@ -56,7 +56,7 @@ function SetAddJS(key) {
if (key === 0) {
key = 0;
}
return %SetAdd(this, key);
return %_SetAdd(this, key);
}
@ -65,7 +65,7 @@ function SetHasJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.has', this]);
}
return %SetHas(this, key);
return %_SetHas(this, key);
}
@ -74,7 +74,7 @@ function SetDeleteJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.delete', this]);
}
return %SetDelete(this, key);
return %_SetDelete(this, key);
}
@ -83,7 +83,7 @@ function SetGetSizeJS() {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.size', this]);
}
return %SetGetSize(this);
return %_SetGetSize(this);
}
@ -92,7 +92,7 @@ function SetClearJS() {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.clear', this]);
}
%SetClear(this);
%_SetClear(this);
}
@ -170,7 +170,7 @@ function MapConstructor(iterable) {
}
}
%MapInitialize(this);
%_MapInitialize(this);
if (IS_UNDEFINED(iter)) return;
@ -193,7 +193,7 @@ function MapGetJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.get', this]);
}
return %MapGet(this, key);
return %_MapGet(this, key);
}
@ -209,7 +209,7 @@ function MapSetJS(key, value) {
if (key === 0) {
key = 0;
}
return %MapSet(this, key, value);
return %_MapSet(this, key, value);
}
@ -218,7 +218,7 @@ function MapHasJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.has', this]);
}
return %MapHas(this, key);
return %_MapHas(this, key);
}
@ -227,7 +227,7 @@ function MapDeleteJS(key) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.delete', this]);
}
return %MapDelete(this, key);
return %_MapDelete(this, key);
}
@ -236,7 +236,7 @@ function MapGetSizeJS() {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.size', this]);
}
return %MapGetSize(this);
return %_MapGetSize(this);
}
@ -245,7 +245,7 @@ function MapClearJS() {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.clear', this]);
}
%MapClear(this);
%_MapClear(this);
}

205
deps/v8/src/compiler.cc

@ -7,6 +7,7 @@
#include "src/compiler.h"
#include "src/ast-numbering.h"
#include "src/ast-this-access-visitor.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
@ -20,6 +21,7 @@
#include "src/isolate-inl.h"
#include "src/lithium.h"
#include "src/liveedit.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/runtime-profiler.h"
@ -34,7 +36,7 @@ namespace internal {
ScriptData::ScriptData(const byte* data, int length)
: owns_data_(false), data_(data), length_(length) {
: owns_data_(false), rejected_(false), data_(data), length_(length) {
if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
byte* copy = NewArray<byte>(length);
DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
@ -144,7 +146,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
script_scope_ = NULL;
extension_ = NULL;
cached_data_ = NULL;
compile_options_ = ScriptCompiler::kNoCompileOptions;
@ -291,14 +293,14 @@ bool CompilationInfo::ShouldSelfOptimize() {
void CompilationInfo::PrepareForCompilation(Scope* scope) {
DCHECK(scope_ == NULL);
scope_ = scope;
}
void CompilationInfo::EnsureFeedbackVector() {
if (feedback_vector_.is_null()) {
// Allocate the feedback vector too.
feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(
function()->slot_count(), function()->ic_slot_count());
function()->feedback_vector_spec());
}
DCHECK(feedback_vector_->Slots() == function()->slot_count() &&
feedback_vector_->ICSlots() == function()->ic_slot_count());
}
@ -308,29 +310,29 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
: HOptimizedGraphBuilder(info) {
}
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
if (node->position() != RelocInfo::kNoPosition) { \
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
}
STATEMENT_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) OVERRIDE { \
HOptimizedGraphBuilder::Visit##type(node); \
#define DEF_VISIT(type) \
void Visit##type(type* node) OVERRIDE { \
HOptimizedGraphBuilder::Visit##type(node); \
}
MODULE_NODE_LIST(DEF_VISIT)
DECLARATION_NODE_LIST(DEF_VISIT)
@ -342,9 +344,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->IsOptimizing());
DCHECK(!info()->IsCompilingForDebugging());
// We should never arrive here if optimization has been disabled on the
// shared function info.
DCHECK(!info()->shared_info()->optimization_disabled());
// Optimization could have been disabled by the parser.
if (info()->shared_info()->optimization_disabled()) {
return AbortOptimization(
info()->shared_info()->disable_optimization_reason());
}
// Do not use crankshaft if we need to be able to set break points.
if (isolate()->DebuggerHasBreakPoints()) {
@ -411,6 +415,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Check the whitelist for TurboFan.
if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
info()->closure()->PassesFilter(FLAG_turbo_filter)) {
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
<< " using TurboFan]" << std::endl;
}
Timer t(this, &time_taken_to_create_graph_);
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
if (!info()->code().is_null()) {
@ -418,10 +428,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
}
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
<< " using Crankshaft]" << std::endl;
}
if (FLAG_trace_hydrogen) {
Handle<String> name = info()->function()->debug_name();
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", name->ToCString().get());
isolate()->GetHTracer()->TraceCompilation(info());
}
@ -566,18 +579,24 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
// the estimate conservatively.
if (shared->GetIsolate()->serializer_enabled()) {
estimate += 2;
} else if (FLAG_clever_optimizations) {
} else {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
estimate += 8;
} else {
estimate += 3;
}
shared->set_expected_nof_properties(estimate);
}
static void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
BailoutReason bailout_reason) {
if (bailout_reason != kNoReason) {
shared_info->DisableOptimization(bailout_reason);
}
}
// Sets the function info on a function.
// The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the
@ -604,9 +623,12 @@ static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_is_function(lit->is_function());
function_info->set_bailout_reason(lit->dont_optimize_reason());
MaybeDisableOptimization(function_info, lit->dont_optimize_reason());
function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
function_info->set_kind(lit->kind());
function_info->set_uses_super_property(lit->uses_super_property());
function_info->set_uses_super_constructor_call(
lit->uses_super_constructor_call());
function_info->set_asm_function(lit->scope()->asm_function());
}
@ -667,7 +689,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
FunctionLiteral* lit = info->function();
shared->set_strict_mode(lit->strict_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
shared->set_bailout_reason(lit->dont_optimize_reason());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
// Compile unoptimized code.
if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
@ -740,18 +762,92 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
static bool Renumber(CompilationInfo* info) {
if (!AstNumbering::Renumber(info->function(), info->zone())) return false;
if (!info->shared_info().is_null()) {
info->shared_info()->set_ast_node_count(info->function()->ast_node_count());
FunctionLiteral* lit = info->function();
info->shared_info()->set_ast_node_count(lit->ast_node_count());
MaybeDisableOptimization(info->shared_info(), lit->dont_optimize_reason());
info->shared_info()->set_dont_cache(lit->flags()->Contains(kDontCache));
}
return true;
}
static void ThrowSuperConstructorCheckError(CompilationInfo* info,
Statement* stmt) {
MaybeHandle<Object> obj = info->isolate()->factory()->NewTypeError(
"super_constructor_call", HandleVector<Object>(nullptr, 0));
Handle<Object> exception;
if (!obj.ToHandle(&exception)) return;
MessageLocation location(info->script(), stmt->position(), stmt->position());
USE(info->isolate()->Throw(*exception, &location));
}
static bool CheckSuperConstructorCall(CompilationInfo* info) {
FunctionLiteral* function = info->function();
if (!function->uses_super_constructor_call()) return true;
if (function->is_default_constructor()) return true;
ZoneList<Statement*>* body = function->body();
CHECK(body->length() > 0);
int super_call_index = 0;
// Allow 'use strict' and similiar and empty statements.
while (true) {
CHECK(super_call_index < body->length()); // We know there is a super call.
Statement* stmt = body->at(super_call_index);
if (stmt->IsExpressionStatement() &&
stmt->AsExpressionStatement()->expression()->IsLiteral()) {
super_call_index++;
continue;
}
if (stmt->IsEmptyStatement()) {
super_call_index++;
continue;
}
break;
}
Statement* stmt = body->at(super_call_index);
ExpressionStatement* exprStm = stmt->AsExpressionStatement();
if (exprStm == nullptr) {
ThrowSuperConstructorCheckError(info, stmt);
return false;
}
Call* callExpr = exprStm->expression()->AsCall();
if (callExpr == nullptr) {
ThrowSuperConstructorCheckError(info, stmt);
return false;
}
if (!callExpr->expression()->IsSuperReference()) {
ThrowSuperConstructorCheckError(info, stmt);
return false;
}
ZoneList<Expression*>* arguments = callExpr->arguments();
AstThisAccessVisitor this_access_visitor(info->zone());
this_access_visitor.VisitExpressions(arguments);
if (this_access_visitor.HasStackOverflow()) return false;
if (this_access_visitor.UsesThis()) {
ThrowSuperConstructorCheckError(info, stmt);
return false;
}
return true;
}
bool Compiler::Analyze(CompilationInfo* info) {
DCHECK(info->function() != NULL);
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
if (!Renumber(info)) return false;
DCHECK(info->scope() != NULL);
if (!CheckSuperConstructorCall(info)) return false;
return true;
}
@ -784,11 +880,6 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
InsertCodeIntoOptimizedCodeMap(info);
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
info->shared_info());
if (FLAG_trace_opt) {
PrintF("[completed optimizing ");
info->closure()->ShortPrint();
PrintF("]\n");
}
return true;
}
@ -857,12 +948,8 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
info.SetOptimizing(BailoutId::None(),
Handle<Code>(function->shared()->code()));
info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
info.MarkAsContextSpecializing();
info.MarkAsTypingEnabled();
info.MarkAsInliningDisabled();
if (GetOptimizedCodeNow(&info)) {
DCHECK(function->shared()->is_compiled());
@ -927,16 +1014,23 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK(info->function() != NULL);
DCHECK(info->scope() != NULL);
if (!info->shared_info()->has_deoptimization_support()) {
CompilationInfoWithZone unoptimized(info->shared_info());
Handle<SharedFunctionInfo> shared = info->shared_info();
CompilationInfoWithZone unoptimized(shared);
// Note that we use the same AST that we will use for generating the
// optimized code.
unoptimized.SetFunction(info->function());
unoptimized.PrepareForCompilation(info->scope());
unoptimized.SetContext(info->context());
unoptimized.EnableDeoptimizationSupport();
// If the current code has reloc info for serialization, also include
// reloc info for serialization for the new code, so that deopt support
// can be added without losing IC state.
if (shared->code()->kind() == Code::FUNCTION &&
shared->code()->has_reloc_info_for_serialization()) {
unoptimized.PrepareForSerializing();
}
if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
shared->set_feedback_vector(*unoptimized.feedback_vector());
@ -1167,19 +1261,20 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
int column_offset, bool is_shared_cross_origin, Handle<Context> context,
v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
if (compile_options == ScriptCompiler::kNoCompileOptions) {
cached_data = NULL;
} else if (compile_options == ScriptCompiler::kProduceParserCache ||
compile_options == ScriptCompiler::kProduceCodeCache) {
DCHECK(cached_data && !*cached_data);
DCHECK(extension == NULL);
DCHECK(!isolate->debug()->is_loaded());
} else {
DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
compile_options == ScriptCompiler::kConsumeCodeCache);
DCHECK(cached_data && *cached_data);
DCHECK(extension == NULL);
}
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
@ -1243,10 +1338,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
compilation_cache->PutScript(source, context, result);
// TODO(yangguo): Issue 3628
// With block scoping, top-level variables may resolve to a global,
// context, which makes the code context-dependent.
if (FLAG_serialize_toplevel && !FLAG_harmony_scoping &&
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
@ -1305,10 +1397,10 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
bool allow_lazy = literal->AllowsLazyCompilation() &&
!DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
if (outer_info->is_toplevel() && outer_info->will_serialize()) {
// Make sure that if the toplevel code (possibly to be serialized),
// the inner function must be allowed to be compiled lazily.
// This is necessary to serialize toplevel code without inner functions.
DCHECK(allow_lazy);
}
@ -1317,8 +1409,18 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
Handle<Code> code = isolate->builtins()->CompileLazy();
info.SetCode(code);
// There's no need in theory for a lazy-compiled function to have a type
// feedback vector, but some parts of the system expect all
// SharedFunctionInfo instances to have one. The size of the vector depends
// on how many feedback-needing nodes are in the tree, and when lazily
// parsing we might not know that, if this function was never parsed before.
// In that case the vector will be replaced the next time MakeCode is
// called.
info.EnsureFeedbackVector();
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
} else if (Renumber(&info) && FullCodeGenerator::MakeCode(&info)) {
// MakeCode will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
scope_info = ScopeInfo::Create(info.scope(), info.zone());
} else {
@ -1357,6 +1459,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
VMState<COMPILER> state(isolate);
DCHECK(isolate->use_crankshaft());
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);

23
deps/v8/src/compiler.h

@ -39,6 +39,9 @@ class ScriptData {
const byte* data() const { return data_; }
int length() const { return length_; }
bool rejected() const { return rejected_; }
void Reject() { rejected_ = true; }
void AcquireDataOwnership() {
DCHECK(!owns_data_);
@ -51,7 +54,8 @@ class ScriptData {
}
private:
bool owns_data_;
bool owns_data_ : 1;
bool rejected_ : 1;
const byte* data_;
int length_;
@ -105,7 +109,7 @@ class CompilationInfo {
}
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Scope* global_scope() const { return global_scope_; }
Scope* script_scope() const { return script_scope_; }
Handle<Code> code() const { return code_; }
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@ -200,8 +204,6 @@ class CompilationInfo {
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
void MarkAsInliningDisabled() { SetFlag(kInliningEnabled, false); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
@ -231,10 +233,11 @@ class CompilationInfo {
function_ = literal;
}
void PrepareForCompilation(Scope* scope);
void SetGlobalScope(Scope* global_scope) {
DCHECK(global_scope_ == NULL);
global_scope_ = global_scope;
void SetScriptScope(Scope* script_scope) {
DCHECK(script_scope_ == NULL);
script_scope_ = script_scope;
}
void EnsureFeedbackVector();
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
@ -441,8 +444,8 @@ class CompilationInfo {
// The scope of the function literal as a convenience. Set to indicate
// that scopes have been analyzed.
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
// The script scope provided as a convenience.
Scope* script_scope_;
// For compiled stubs, the stub object
HydrogenCodeStub* code_stub_;
// The compiled code.
@ -460,7 +463,7 @@ class CompilationInfo {
ScriptData** cached_data_;
ScriptCompiler::CompileOptions compile_options_;
// The context of the caller for eval code, and the global context for a
// The context of the caller for eval code, and the script context for a
// global script. Will be a null handle otherwise.
Handle<Context> context_;

44
deps/v8/src/compiler/access-builder.cc

@ -40,21 +40,21 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
Type::UntaggedPtr(), kMachPtr};
Type::UntaggedPointer(), kMachPtr};
}
// static
FieldAccess AccessBuilder::ForExternalArrayPointer() {
return {kTaggedBase, ExternalArray::kExternalPointerOffset,
MaybeHandle<Name>(), Type::UntaggedPtr(), kMachPtr};
MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
}
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
Type::UntaggedInt8(), kMachUint8};
Type::UntaggedUnsigned8(), kMachUint8};
}
@ -72,10 +72,18 @@ FieldAccess AccessBuilder::ForValue() {
}
// static
FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
return {kTaggedBase, offset, Handle<Name>(), Type::Any(), kMachAnyTagged};
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
return {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
kMachAnyTagged};
return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
}
@ -86,33 +94,25 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
switch (type) {
case kExternalInt8Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt8};
return {taggedness, header_size, Type::Signed32(), kMachInt8};
case kExternalUint8Array:
case kExternalUint8ClampedArray:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint8};
return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
case kExternalInt16Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt16};
return {taggedness, header_size, Type::Signed32(), kMachInt16};
case kExternalUint16Array:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint16};
return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
case kExternalInt32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
kMachInt32};
return {taggedness, header_size, Type::Signed32(), kMachInt32};
case kExternalUint32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size,
Type::Unsigned32(), kMachUint32};
return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
case kExternalFloat32Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
kMachFloat32};
return {taggedness, header_size, Type::Number(), kMachFloat32};
case kExternalFloat64Array:
return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
kMachFloat64};
return {taggedness, header_size, Type::Number(), kMachFloat64};
}
UNREACHABLE();
return {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::None(), kMachNone};
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
} // namespace compiler

3
deps/v8/src/compiler/access-builder.h

@ -43,6 +43,9 @@ class AccessBuilder FINAL : public AllStatic {
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
// Provides access Context slots.
static FieldAccess ForContextSlot(size_t index);
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();

275
deps/v8/src/compiler/arm/code-generator-arm.cc

@ -77,6 +77,7 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
}
UNREACHABLE();
@ -141,9 +142,8 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(r0);
}
MemOperand InputOffset() {
int index = 0;
return InputOffset(&index);
MemOperand InputOffset(int first_index = 0) {
return InputOffset(&first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
@ -158,6 +158,112 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
};
namespace {
class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ vmov(result_, std::numeric_limits<float>::quiet_NaN());
}
private:
SwVfpRegister const result_;
};
class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
public:
OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
}
private:
DwVfpRegister const result_;
};
class OutOfLineLoadInteger FINAL : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ mov(result_, Operand::Zero()); }
private:
Register const result_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
auto result = i.OutputFloat##width##Register(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
__ b(hs, ool->entry()); \
__ vldr(result, i.InputOffset(2)); \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
__ b(hs, ool->entry()); \
__ asm_instr(result, i.InputOffset(2)); \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto value = i.InputFloat##width##Register(2); \
__ vstr(value, i.InputOffset(3), lo); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
__ cmp(offset, i.InputImmediate(1)); \
} \
auto value = i.InputRegister(2); \
__ asm_instr(value, i.InputOffset(3), lo); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ArmOperandConverter i(this, instr);
@ -193,7 +299,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchJmp:
__ b(GetLabel(i.InputRpo(0)));
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
@ -299,6 +405,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmSxtb:
__ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmSxth:
__ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmSxtab:
__ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmSxtah:
__ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmUxtb:
__ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmUxth:
__ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmUxtab:
__ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmUxtah:
__ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
@ -498,35 +640,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
break;
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
break;
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
break;
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(64);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(strb);
break;
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(strh);
break;
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr,
FlagsCondition condition) {
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArmOperandConverter i(this, instr);
Label done;
// Emit a branch. The true and false targets are always the last two inputs
// to the instruction.
BasicBlock::RpoNumber tblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
BasicBlock::RpoNumber fblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
bool fallthru = IsNextInAssemblyOrder(fblock);
Label* tlabel = GetLabel(tblock);
Label* flabel = fallthru ? &done : GetLabel(fblock);
switch (condition) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
switch (branch->condition) {
case kUnorderedEqual:
__ b(vs, flabel);
// Fall through.
// The "eq" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kEqual:
__ b(eq, tlabel);
break;
case kUnorderedNotEqual:
__ b(vs, tlabel);
// Fall through.
// Unordered or not equal can be tested with "ne" condtion.
// See ARMv7 manual A8.3 - Conditional execution.
case kNotEqual:
__ b(ne, tlabel);
break;
@ -543,26 +712,28 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
__ b(gt, tlabel);
break;
case kUnorderedLessThan:
__ b(vs, flabel);
// Fall through.
// The "lo" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kUnsignedLessThan:
__ b(lo, tlabel);
break;
case kUnorderedGreaterThanOrEqual:
__ b(vs, tlabel);
// Fall through.
// Unordered, greater than or equal can be tested with "hs" condtion.
// See ARMv7 manual A8.3 - Conditional execution.
case kUnsignedGreaterThanOrEqual:
__ b(hs, tlabel);
break;
case kUnorderedLessThanOrEqual:
__ b(vs, flabel);
// Fall through.
// The "ls" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kUnsignedLessThanOrEqual:
__ b(ls, tlabel);
break;
case kUnorderedGreaterThan:
__ b(vs, tlabel);
// Fall through.
// Unordered or greater than can be tested with "hi" condtion.
// See ARMv7 manual A8.3 - Conditional execution.
case kUnsignedGreaterThan:
__ b(hi, tlabel);
break;
@ -573,8 +744,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
__ b(vc, tlabel);
break;
}
if (!fallthru) __ b(flabel); // no fallthru to flabel.
__ bind(&done);
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
@ -702,24 +877,6 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +2 for return address and saved frame pointer.
int receiver_slot = info->scope()->num_parameters() + 2;
__ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(ne, &ok);
__ ldr(r2, GlobalObjectOperand());
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
__ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
__ bind(&ok);
}
} else {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
@ -809,24 +966,26 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kHeapObject:
__ Move(dst, src.ToHeapObject());
break;
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
SwVfpRegister dst = destination->IsDoubleRegister()
? g.ToFloat32Register(destination)
: kScratchDoubleReg.low();
// TODO(turbofan): Can we do better here?
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ vmov(dst, ip);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
} else {
SwVfpRegister dst = g.ToFloat32Register(destination);
__ vmov(dst, src.ToFloat32());
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsDoubleRegister()
? g.ToFloat64Register(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64());
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}

8
deps/v8/src/compiler/arm/instruction-codes-arm.h

@ -35,6 +35,14 @@ namespace compiler {
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
V(ArmSxtb) \
V(ArmSxth) \
V(ArmSxtab) \
V(ArmSxtah) \
V(ArmUxtb) \
V(ArmUxth) \
V(ArmUxtab) \
V(ArmUxtah) \
V(ArmVcmpF64) \
V(ArmVaddF64) \
V(ArmVsubF64) \

533
deps/v8/src/compiler/arm/instruction-selector-arm.cc

@ -16,13 +16,6 @@ class ArmOperandGenerator : public OperandGenerator {
explicit ArmOperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
if (CanBeImmediate(node, opcode)) {
return UseImmediate(node);
}
return UseRegister(node);
}
bool CanBeImmediate(int32_t value) const {
return Assembler::ImmediateFitsAddrMode1Instruction(value);
}
@ -74,62 +67,26 @@ class ArmOperandGenerator : public OperandGenerator {
case kArmStrh:
return value >= -255 && value <= 255;
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchJmp:
case kArchNop:
case kArchRet:
case kArchStackPointer:
case kArchTruncateDoubleToI:
case kArmMul:
case kArmMla:
case kArmMls:
case kArmSmmul:
case kArmSmmla:
case kArmUmull:
case kArmSdiv:
case kArmUdiv:
case kArmBfc:
case kArmUbfx:
case kArmVcmpF64:
case kArmVaddF64:
case kArmVsubF64:
case kArmVmulF64:
case kArmVmlaF64:
case kArmVmlsF64:
case kArmVdivF64:
case kArmVmodF64:
case kArmVnegF64:
case kArmVsqrtF64:
case kArmVfloorF64:
case kArmVceilF64:
case kArmVroundTruncateF64:
case kArmVroundTiesAwayF64:
case kArmVcvtF32F64:
case kArmVcvtF64F32:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
case kArmPush:
return false;
default:
break;
}
UNREACHABLE();
return false;
}
};
static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
namespace {
void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
ArmOperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)),
@ -137,86 +94,69 @@ static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
}
static bool TryMatchROR(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
AddressingMode kImmMode, AddressingMode kRegMode>
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() != IrOpcode::kWord32Ror) return false;
Int32BinopMatcher m(node);
*value_return = g.UseRegister(m.left().node());
if (m.right().IsInRange(1, 31)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
*shift_return = g.UseImmediate(m.right().node());
} else {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
*shift_return = g.UseRegister(m.right().node());
if (node->opcode() == kOpcode) {
Int32BinopMatcher m(node);
*value_return = g.UseRegister(m.left().node());
if (m.right().IsInRange(kImmMin, kImmMax)) {
*opcode_return |= AddressingModeField::encode(kImmMode);
*shift_return = g.UseImmediate(m.right().node());
} else {
*opcode_return |= AddressingModeField::encode(kRegMode);
*shift_return = g.UseRegister(m.right().node());
}
return true;
}
return true;
return false;
}
static inline bool TryMatchASR(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() != IrOpcode::kWord32Sar) return false;
Int32BinopMatcher m(node);
*value_return = g.UseRegister(m.left().node());
if (m.right().IsInRange(1, 32)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
*shift_return = g.UseImmediate(m.right().node());
} else {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
*shift_return = g.UseRegister(m.right().node());
}
return true;
bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand** value_return,
InstructionOperand** shift_return) {
return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
value_return, shift_return);
}
static inline bool TryMatchLSL(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() != IrOpcode::kWord32Shl) return false;
Int32BinopMatcher m(node);
*value_return = g.UseRegister(m.left().node());
if (m.right().IsInRange(0, 31)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
*shift_return = g.UseImmediate(m.right().node());
} else {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
*shift_return = g.UseRegister(m.right().node());
}
return true;
bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand** value_return,
InstructionOperand** shift_return) {
return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
value_return, shift_return);
}
static inline bool TryMatchLSR(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
ArmOperandGenerator g(selector);
if (node->opcode() != IrOpcode::kWord32Shr) return false;
Int32BinopMatcher m(node);
*value_return = g.UseRegister(m.left().node());
if (m.right().IsInRange(1, 32)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
*shift_return = g.UseImmediate(m.right().node());
} else {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
*shift_return = g.UseRegister(m.right().node());
}
return true;
bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand** value_return,
InstructionOperand** shift_return) {
return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
value_return, shift_return);
}
bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand** value_return,
InstructionOperand** shift_return) {
return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
value_return, shift_return);
}
static inline bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
bool TryMatchShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
InstructionOperand** value_return,
InstructionOperand** shift_return) {
return (
TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
@ -225,11 +165,10 @@ static inline bool TryMatchShift(InstructionSelector* selector,
}
static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
InstructionCode* opcode_return,
Node* node,
size_t* input_count_return,
InstructionOperand** inputs) {
bool TryMatchImmediateOrShift(InstructionSelector* selector,
InstructionCode* opcode_return, Node* node,
size_t* input_count_return,
InstructionOperand** inputs) {
ArmOperandGenerator g(selector);
if (g.CanBeImmediate(node, *opcode_return)) {
*opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
@ -245,9 +184,9 @@ static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode,
FlagsContinuation* cont) {
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[5];
@ -255,8 +194,20 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionOperand* outputs[2];
size_t output_count = 0;
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
if (m.left().node() == m.right().node()) {
// If both inputs refer to the same operand, enforce allocating a register
// for both of them to ensure that we don't end up generating code like
// this:
//
// mov r0, r1, asr #16
// adds r0, r0, r1, asr #16
// bvs label
InstructionOperand* const input = g.UseRegister(m.left().node());
opcode |= AddressingModeField::encode(kMode_Operand2_R);
inputs[input_count++] = input;
inputs[input_count++] = input;
} else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
inputs[0] = g.UseRegister(m.left().node());
input_count++;
} else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
@ -293,13 +244,16 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode) {
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, InstructionCode reverse_opcode) {
FlagsContinuation cont;
VisitBinop(selector, node, opcode, reverse_opcode, &cont);
}
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
@ -396,8 +350,86 @@ void InstructionSelector::VisitStore(Node* node) {
}
static inline void EmitBic(InstructionSelector* selector, Node* node,
Node* left, Node* right) {
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
? g.UseImmediate(length)
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer), offset_operand);
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
InstructionOperand* offset_operand = g.UseRegister(offset);
InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
? g.UseImmediate(length)
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
offset_operand, length_operand, g.UseRegister(value),
g.UseRegister(buffer), offset_operand);
}
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
Node* right) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmBic;
InstructionOperand* value_operand;
@ -413,6 +445,18 @@ static inline void EmitBic(InstructionSelector* selector, Node* node,
}
void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
uint32_t lsb, uint32_t width) {
DCHECK_LE(1, width);
DCHECK_LE(width, 32 - lsb);
ArmOperandGenerator g(selector);
selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
g.TempImmediate(lsb), g.TempImmediate(width));
}
} // namespace
void InstructionSelector::VisitWord32And(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@ -430,27 +474,27 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
if (IsSupported(ARMv7) && m.right().HasValue()) {
// Try to interpret this AND as UBFX.
if (m.right().HasValue()) {
uint32_t const value = m.right().Value();
uint32_t width = base::bits::CountPopulation32(value);
uint32_t msb = base::bits::CountLeadingZeros32(value);
if (width != 0 && msb + width == 32) {
// Try to interpret this AND as UBFX.
if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
if (m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().IsInRange(0, 31)) {
Emit(kArmUbfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
return;
// UBFX cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
// still use UBFX with a smaller mask and the remaining bits will be
// zeros.
uint32_t const lsb = mleft.right().Value();
return EmitUbfx(this, node, mleft.left().node(), lsb,
std::min(width, 32 - lsb));
}
}
Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(0), g.TempImmediate(width));
return;
return EmitUbfx(this, node, m.left().node(), 0, width);
}
// Try to interpret this AND as BIC.
if (g.CanBeImmediate(~value)) {
Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
@ -458,16 +502,23 @@ void InstructionSelector::VisitWord32And(Node* node) {
g.TempImmediate(~value));
return;
}
// Try to interpret this AND as BFC.
width = 32 - width;
msb = base::bits::CountLeadingZeros32(~value);
uint32_t lsb = base::bits::CountTrailingZeros32(~value);
if (msb + width + lsb == 32) {
Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.TempImmediate(lsb), g.TempImmediate(width));
// Try to interpret this AND as UXTH.
if (value == 0xffff) {
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(m.left().node()), g.TempImmediate(0));
return;
}
// Try to interpret this AND as BFC.
if (IsSupported(ARMv7)) {
width = 32 - width;
msb = base::bits::CountLeadingZeros32(~value);
uint32_t lsb = base::bits::CountTrailingZeros32(~value);
if (msb + width + lsb == 32) {
Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.TempImmediate(lsb), g.TempImmediate(width));
return;
}
}
}
VisitBinop(this, node, kArmAnd, kArmAnd);
}
@ -559,10 +610,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
uint32_t msb = base::bits::CountLeadingZeros32(value);
if (msb + width + lsb == 32) {
DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
Emit(kArmUbfx, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
g.TempImmediate(width));
return;
return EmitUbfx(this, node, mleft.left().node(), lsb, width);
}
}
}
@ -571,6 +619,20 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
Emit(kArmSxth, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
Emit(kArmSxtb, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
}
}
VisitShift(this, node, TryMatchASR);
}
@ -583,31 +645,113 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
return;
}
if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
return;
}
if (m.left().IsInt32MulHigh() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
Emit(kArmSmmla, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
return;
if (CanCover(node, m.left().node())) {
switch (m.left().opcode()) {
case IrOpcode::kInt32Mul: {
Int32BinopMatcher mleft(m.left().node());
Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()),
g.UseRegister(m.right().node()));
return;
}
case IrOpcode::kInt32MulHigh: {
Int32BinopMatcher mleft(m.left().node());
Emit(kArmSmmla, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()),
g.UseRegister(m.right().node()));
return;
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(0xff)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
} else if (mleft.right().Is(0xffff)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
}
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mleft(m.left().node());
if (CanCover(mleft.node(), mleft.left().node()) &&
mleft.left().IsWord32Shl()) {
Int32BinopMatcher mleftleft(mleft.left().node());
if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
Emit(kArmSxtab, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
return;
} else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
Emit(kArmSxtah, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
return;
}
}
}
default:
break;
}
}
if (m.right().IsInt32MulHigh() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Emit(kArmSmmla, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
return;
if (CanCover(node, m.right().node())) {
switch (m.right().opcode()) {
case IrOpcode::kInt32Mul: {
Int32BinopMatcher mright(m.right().node());
Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
return;
}
case IrOpcode::kInt32MulHigh: {
Int32BinopMatcher mright(m.right().node());
Emit(kArmSmmla, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
return;
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mright(m.right().node());
if (mright.right().Is(0xff)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
} else if (mright.right().Is(0xffff)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
}
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher mright(m.right().node());
if (CanCover(mright.node(), mright.left().node()) &&
mright.left().IsWord32Shl()) {
Int32BinopMatcher mrightleft(mright.left().node());
if (mright.right().Is(24) && mrightleft.right().Is(24)) {
Emit(kArmSxtab, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
return;
} else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
Emit(kArmSxtah, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
return;
}
}
}
default:
break;
}
}
VisitBinop(this, node, kArmAdd, kArmAdd);
}
@ -786,16 +930,16 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
Float64BinopMatcher m(node);
if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
Float64BinopMatcher mleft(m.left().node());
Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Float64BinopMatcher mright(m.right().node());
Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
@ -807,9 +951,14 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
Emit(kArmVnegF64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
}
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Float64BinopMatcher mright(m.right().node());
Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
@ -820,13 +969,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
void InstructionSelector::VisitFloat64Mul(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.right().Is(-1.0)) {
Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
VisitRRRFloat64(this, kArmVmulF64, node);
}
VisitRRRFloat64(this, kArmVmulF64, node);
}
@ -874,7 +1017,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
void InstructionSelector::VisitCall(Node* node) {
ArmOperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = NULL;
if (descriptor->NeedsFrameState()) {
@ -1102,10 +1245,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
if (IsNextInAssemblyOrder(tbranch)) { // We can fallthru to the true block.
cont.Negate();
cont.SwapBlocks();
}
VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
}
@ -1187,9 +1326,7 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kInt32ModIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kUint32ModIsSafe;
MachineOperatorBuilder::kUint32DivIsSafe;
if (CpuFeatures::IsSupported(ARMv8)) {
flags |= MachineOperatorBuilder::kFloat64Floor |

4
deps/v8/src/compiler/arm/linkage-arm.cc

@ -51,9 +51,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
CallDescriptor::Flags flags, Zone* zone) {
CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
flags);
flags, properties);
}

374
deps/v8/src/compiler/arm64/code-generator-arm64.cc

@ -24,6 +24,18 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
DoubleRegister InputFloat32Register(int index) {
return InputDoubleRegister(index).S();
}
DoubleRegister InputFloat64Register(int index) {
return InputDoubleRegister(index);
}
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
Register InputRegister32(int index) {
return ToRegister(instr_->InputAt(index)).W();
}
@ -106,9 +118,8 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
MemOperand MemoryOperand() {
int index = 0;
return MemoryOperand(&index);
MemOperand MemoryOperand(int first_index = 0) {
return MemoryOperand(&first_index);
}
Operand ToOperand(InstructionOperand* op) {
@ -142,6 +153,9 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(constant.ToExternalReference());
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
break;
}
UNREACHABLE();
return Operand(-1);
@ -160,6 +174,106 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
};
namespace {
class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
public:
OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
public:
OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL {
__ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
}
private:
DoubleRegister const result_;
};
class OutOfLineLoadZero FINAL : public OutOfLineCode {
public:
OutOfLineLoadZero(CodeGenerator* gen, Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() FINAL { __ Mov(result_, 0); }
private:
Register const result_;
};
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
auto result = i.OutputFloat##width##Register(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
__ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
__ B(hs, ool->entry()); \
__ Ldr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister32(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
__ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
__ B(hs, ool->entry()); \
__ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
auto value = i.InputFloat##width##Register(3); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
__ Str(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
auto value = i.InputRegister32(3); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
__ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@ -172,15 +286,6 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
} while (0)
#define ASSEMBLE_TEST_AND_BRANCH(asm_instr, width) \
do { \
bool fallthrough = IsNextInAssemblyOrder(i.InputRpo(3)); \
__ asm_instr(i.InputRegister##width(0), i.InputInt6(1), \
GetLabel(i.InputRpo(2))); \
if (!fallthrough) __ B(GetLabel(i.InputRpo(3))); \
} while (0)
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
@ -216,7 +321,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchJmp:
__ B(GetLabel(i.InputRpo(0)));
AssembleArchJump(i.InputRpo(0));
break;
case kArchNop:
// don't emit code for nops.
@ -418,6 +523,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Mov32:
__ Mov(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Sxtb32:
__ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Sxth32:
__ Sxth(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Sxtw:
__ Sxtw(i.OutputRegister(), i.InputRegister32(0));
break;
@ -429,17 +540,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
case kArm64Tbz:
ASSEMBLE_TEST_AND_BRANCH(Tbz, 64);
break;
case kArm64Tbz32:
ASSEMBLE_TEST_AND_BRANCH(Tbz, 32);
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
case kArm64Tbnz:
ASSEMBLE_TEST_AND_BRANCH(Tbnz, 64);
break;
case kArm64Tbnz32:
ASSEMBLE_TEST_AND_BRANCH(Tbnz, 32);
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64Claim: {
int words = MiscField::decode(instr->opcode());
@ -597,83 +703,154 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
}
}
// Assemble branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr,
FlagsCondition condition) {
Arm64OperandConverter i(this, instr);
Label done;
// Emit a branch. The true and false targets are always the last two inputs
// to the instruction.
BasicBlock::RpoNumber tblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
BasicBlock::RpoNumber fblock =
i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
bool fallthru = IsNextInAssemblyOrder(fblock);
Label* tlabel = GetLabel(tblock);
Label* flabel = fallthru ? &done : GetLabel(fblock);
switch (condition) {
case kUnorderedEqual:
__ B(vs, flabel);
// Fall through.
case kEqual:
__ B(eq, tlabel);
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
case kUnorderedNotEqual:
__ B(vs, tlabel);
// Fall through.
case kNotEqual:
__ B(ne, tlabel);
case kCheckedLoadUint8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
break;
case kSignedLessThan:
__ B(lt, tlabel);
case kCheckedLoadInt16:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
break;
case kSignedGreaterThanOrEqual:
__ B(ge, tlabel);
case kCheckedLoadUint16:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
break;
case kSignedLessThanOrEqual:
__ B(le, tlabel);
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
break;
case kSignedGreaterThan:
__ B(gt, tlabel);
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
case kUnorderedLessThan:
__ B(vs, flabel);
// Fall through.
case kUnsignedLessThan:
__ B(lo, tlabel);
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(64);
break;
case kUnorderedGreaterThanOrEqual:
__ B(vs, tlabel);
// Fall through.
case kUnsignedGreaterThanOrEqual:
__ B(hs, tlabel);
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
break;
case kUnorderedLessThanOrEqual:
__ B(vs, flabel);
// Fall through.
case kUnsignedLessThanOrEqual:
__ B(ls, tlabel);
case kCheckedStoreWord16:
ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
break;
case kUnorderedGreaterThan:
__ B(vs, tlabel);
// Fall through.
case kUnsignedGreaterThan:
__ B(hi, tlabel);
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(Str);
break;
case kOverflow:
__ B(vs, tlabel);
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
case kNotOverflow:
__ B(vc, tlabel);
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
if (!fallthru) __ B(flabel); // no fallthru to flabel.
__ Bind(&done);
}
// Assemble branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Arm64OperandConverter i(this, instr);
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
FlagsCondition condition = branch->condition;
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
break;
case kNotEqual:
__ Cbnz(i.InputRegister32(0), tlabel);
break;
default:
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
break;
case kNotEqual:
__ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
break;
default:
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
break;
case kNotEqual:
__ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
break;
default:
UNREACHABLE();
}
} else {
switch (condition) {
case kUnorderedEqual:
// The "eq" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kEqual:
__ B(eq, tlabel);
break;
case kUnorderedNotEqual:
// Unordered or not equal can be tested with "ne" condtion.
// See ARMv8 manual C1.2.3 - Condition Code.
case kNotEqual:
__ B(ne, tlabel);
break;
case kSignedLessThan:
__ B(lt, tlabel);
break;
case kSignedGreaterThanOrEqual:
__ B(ge, tlabel);
break;
case kSignedLessThanOrEqual:
__ B(le, tlabel);
break;
case kSignedGreaterThan:
__ B(gt, tlabel);
break;
case kUnorderedLessThan:
// The "lo" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kUnsignedLessThan:
__ B(lo, tlabel);
break;
case kUnorderedGreaterThanOrEqual:
// Unordered, greater than or equal can be tested with "hs" condtion.
// See ARMv8 manual C1.2.3 - Condition Code.
case kUnsignedGreaterThanOrEqual:
__ B(hs, tlabel);
break;
case kUnorderedLessThanOrEqual:
// The "ls" condition will not catch the unordered case.
// The jump/fall through to false label will be used if the comparison
// was unordered.
case kUnsignedLessThanOrEqual:
__ B(ls, tlabel);
break;
case kUnorderedGreaterThan:
// Unordered or greater than can be tested with "hi" condtion.
// See ARMv8 manual C1.2.3 - Condition Code.
case kUnsignedGreaterThan:
__ B(hi, tlabel);
break;
case kOverflow:
__ B(vs, tlabel);
break;
case kNotOverflow:
__ B(vc, tlabel);
break;
}
}
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
@ -757,7 +934,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = vc;
break;
}
__ bind(&check);
__ Bind(&check);
__ Cset(reg, cc);
__ Bind(&done);
}
@ -792,23 +969,6 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +2 for return address and saved frame pointer.
int receiver_slot = info->scope()->num_parameters() + 2;
__ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
__ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
__ Bind(&ok);
}
} else {
__ SetStackPointer(jssp);
__ StubPrologue();
@ -966,8 +1126,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
UseScratchRegisterScope scope(masm());
CPURegister temp_0 = scope.AcquireX();
CPURegister temp_1 = scope.AcquireX();
DoubleRegister temp_0 = scope.AcquireD();
DoubleRegister temp_1 = scope.AcquireD();
MemOperand src = g.ToMemOperand(source, masm());
MemOperand dst = g.ToMemOperand(destination, masm());
__ Ldr(temp_0, src);

9
deps/v8/src/compiler/arm64/instruction-codes-arm64.h

@ -65,13 +65,14 @@ namespace compiler {
V(Arm64Ror) \
V(Arm64Ror32) \
V(Arm64Mov32) \
V(Arm64Sxtb32) \
V(Arm64Sxth32) \
V(Arm64Sxtw) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
V(Arm64Tbz) \
V(Arm64Tbz32) \
V(Arm64Tbnz) \
V(Arm64Tbnz32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64Claim) \
V(Arm64Poke) \
V(Arm64PokePairZero) \

169
deps/v8/src/compiler/arm64/instruction-selector-arm64.cc

@ -362,6 +362,72 @@ void InstructionSelector::VisitStore(Node* node) {
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
MachineType typ = TypeOf(OpParameter<MachineType>(node));
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
case kRepWord16:
opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
case kRepFloat64:
opcode = kCheckedLoadFloat64;
break;
default:
UNREACHABLE();
return;
}
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
}
void InstructionSelector::VisitCheckedStore(Node* node) {
MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
ArchOpcode opcode;
switch (rep) {
case kRepWord8:
opcode = kCheckedStoreWord8;
break;
case kRepWord16:
opcode = kCheckedStoreWord16;
break;
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
case kRepFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
UNREACHABLE();
return;
}
Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
}
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@ -542,6 +608,17 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord64Shl(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
m.right().IsInRange(32, 63)) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kArm64Lsl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node()));
return;
}
VisitRRO(this, kArm64Lsl, node, kShift64Imm);
}
@ -597,6 +674,21 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
void InstructionSelector::VisitWord32Sar(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
// Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(16) && m.right().Is(16)) {
Emit(kArm64Sxth32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
return;
} else if (mleft.right().Is(24) && m.right().Is(24)) {
Emit(kArm64Sxtb32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()));
return;
}
}
VisitRRO(this, kArm64Asr32, node, kShift32Imm);
}
@ -871,7 +963,41 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
Node* value = node->InputAt(0);
switch (value->opcode()) {
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
case IrOpcode::kWord32Shl:
case IrOpcode::kWord32Shr:
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt32Mul:
case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Div:
case IrOpcode::kInt32Mod:
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32Div:
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kUint32Mod:
case IrOpcode::kUint32MulHigh: {
// 32-bit operations will write their result in a W register (implicitly
// clearing the top 32-bit of the corresponding X register) so the
// zero-extension is a no-op.
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
return;
}
default:
break;
}
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
}
@ -884,6 +1010,18 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
Int64BinopMatcher m(value);
if ((m.IsWord64Sar() && m.right().HasValue() &&
(m.right().Value() == 32)) ||
(m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
}
}
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
@ -943,7 +1081,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
void InstructionSelector::VisitCall(Node* node) {
Arm64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = NULL;
if (descriptor->NeedsFrameState()) {
@ -1097,12 +1235,6 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// If we can fall through to the true block, invert the branch.
if (IsNextInAssemblyOrder(tbranch)) {
cont.Negate();
cont.SwapBlocks();
}
// Try to combine with comparisons against 0 by simply inverting the branch.
while (CanCover(user, value)) {
if (value->opcode() == IrOpcode::kWord32Equal) {
@ -1211,9 +1343,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
ArchOpcode opcode =
(cont.condition() == kEqual) ? kArm64Tbz32 : kArm64Tbnz32;
Emit(opcode, NULL, g.UseRegister(m.left().node()),
Emit(cont.Encode(kArm64TestAndBranch32), NULL,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
g.Label(cont.true_block()),
@ -1230,9 +1361,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont.condition() == kEqual) ||
(cont.condition() == kNotEqual));
ArchOpcode opcode =
(cont.condition() == kEqual) ? kArm64Tbz : kArm64Tbnz;
Emit(opcode, NULL, g.UseRegister(m.left().node()),
Emit(cont.Encode(kArm64TestAndBranch), NULL,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
g.Label(cont.true_block()),
@ -1247,8 +1377,10 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
}
// Branch could not be combined with a compare, emit compare against 0.
VisitWord32Test(this, value, &cont);
// Branch could not be combined with a compare, compare against 0 and branch.
Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
g.Label(cont.true_block()),
g.Label(cont.false_block()))->MarkAsControl();
}
@ -1388,7 +1520,10 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat64Floor |
MachineOperatorBuilder::kFloat64Ceil |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
}
} // namespace compiler
} // namespace internal

4
deps/v8/src/compiler/arm64/linkage-arm64.cc

@ -51,9 +51,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
CallDescriptor::Flags flags, Zone* zone) {
CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
flags);
flags, properties);
}

138
deps/v8/src/compiler/ast-graph-builder.cc

@ -20,14 +20,14 @@ namespace internal {
namespace compiler {
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph)
JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
: StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
info_(info),
jsgraph_(jsgraph),
globals_(0, local_zone),
breakable_(NULL),
execution_context_(NULL),
loop_assignment_analysis_(NULL) {
loop_assignment_analysis_(loop) {
InitializeAstVisitor(local_zone);
}
@ -62,23 +62,26 @@ bool AstGraphBuilder::CreateGraph() {
int parameter_count = info()->num_parameters();
graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
if (FLAG_loop_assignment_analysis) {
// TODO(turbofan): use a temporary zone for the loop assignment analysis.
AstLoopAssignmentAnalyzer analyzer(zone(), info());
loop_assignment_analysis_ = analyzer.Analyze();
}
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
set_environment(&env);
// Initialize the incoming context.
Node* outer_context = GetFunctionContext();
set_current_context(outer_context);
// Build receiver check for sloppy mode if necessary.
// TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
Node* original_receiver = env.Lookup(scope->receiver());
Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
env.Bind(scope->receiver(), patched_receiver);
// Build node to initialize local function context.
Node* closure = GetFunctionClosure();
Node* outer = GetFunctionContext();
Node* inner = BuildLocalFunctionContext(outer, closure);
Node* inner_context = BuildLocalFunctionContext(outer_context, closure);
// Push top-level function scope for the function body.
ContextScope top_context(this, scope, inner);
ContextScope top_context(this, scope, inner_context);
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
@ -139,26 +142,6 @@ static LhsKind DetermineLhsKind(Expression* expr) {
}
// Helper to find an existing shared function info in the baseline code for the
// given function literal. Used to canonicalize SharedFunctionInfo objects.
static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
Code* unoptimized_code, FunctionLiteral* expr) {
int start_position = expr->start_position();
for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
Object* obj = rinfo->target_object();
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
if (shared->start_position() == start_position) {
return Handle<SharedFunctionInfo>(shared);
}
}
}
return Handle<SharedFunctionInfo>();
}
StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
StructuredGraphBuilder::Environment* env) {
return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
@ -386,8 +369,8 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
Handle<Oddball> value = variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value();
globals()->Add(variable->name(), zone());
globals()->Add(value, zone());
globals()->push_back(variable->name());
globals()->push_back(value);
break;
}
case Variable::PARAMETER:
@ -418,8 +401,8 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals()->Add(variable->name(), zone());
globals()->Add(function, zone());
globals()->push_back(variable->name());
globals()->push_back(function);
break;
}
case Variable::PARAMETER:
@ -826,8 +809,8 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build a new shared function info if we cannot find one in the baseline
// code. We also have a stack overflow if the recursive compilation did.
Handle<SharedFunctionInfo> shared_info =
SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
expr->InitializeSharedInfo(handle(info()->shared_info()->code()));
Handle<SharedFunctionInfo> shared_info = expr->shared_info();
if (shared_info.is_null()) {
shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack overflow?
@ -1630,12 +1613,13 @@ void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
DCHECK(globals()->is_empty());
DCHECK(globals()->empty());
AstVisitor::VisitDeclarations(declarations);
if (globals()->is_empty()) return;
Handle<FixedArray> data =
isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
if (globals()->empty()) return;
int array_index = 0;
Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
static_cast<int>(globals()->size()), TENURED);
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
DeclareGlobalsStrictMode::encode(strict_mode());
@ -1643,7 +1627,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
Node* pairs = jsgraph()->Constant(data);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
NewNode(op, current_context(), pairs, flags);
globals()->Rewind(0);
globals()->clear();
}
@ -1773,10 +1757,36 @@ Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
}
Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object). Otherwise there is nothing left to do here.
if (info()->strict_mode() != SLOPPY || info()->is_native()) return receiver;
// There is no need to perform patching if the receiver is never used. Note
// that scope predicates are purely syntactical, a call to eval might still
// inspect the receiver value.
if (!info()->scope()->uses_this() && !info()->scope()->inner_uses_this() &&
!info()->scope()->calls_sloppy_eval()) {
return receiver;
}
IfBuilder receiver_check(this);
Node* undefined = jsgraph()->UndefinedConstant();
Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
receiver_check.If(check);
receiver_check.Then();
environment()->Push(BuildLoadGlobalProxy());
receiver_check.Else();
environment()->Push(receiver);
receiver_check.End();
return environment()->Pop();
}
Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots <= 0) return context;
set_current_context(context);
// Allocate a new local context.
const Operator* op = javascript()->CreateFunctionContext();
@ -1984,7 +1994,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckSilent(current, value, current);
}
} else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
// Non-initializing assignments to legacy const is ignored.
// Non-initializing assignments to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
if (strict_mode() == STRICT) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
} else if (mode == LET && op != Token::INIT_LET) {
// Perform an initialization check for let declared variables.
@ -1998,8 +2013,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT_CONST) {
// All assignments to const variables are early errors.
UNREACHABLE();
// Non-initializing assignments to const is exception in all modes.
return BuildThrowConstAssignError(bailout_id);
}
environment()->Bind(variable, value);
return value;
@ -2013,7 +2028,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
} else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
// Non-initializing assignments to legacy const is ignored.
// Non-initializing assignments to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
if (strict_mode() == STRICT) {
return BuildThrowConstAssignError(bailout_id);
}
return value;
} else if (mode == LET && op != Token::INIT_LET) {
// Perform an initialization check for let declared variables.
@ -2022,8 +2042,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckThrow(current, variable, value, bailout_id);
} else if (mode == CONST && op != Token::INIT_CONST) {
// All assignments to const variables are early errors.
UNREACHABLE();
// Non-initializing assignments to const is exception in all modes.
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, current_context(), value);
@ -2069,6 +2089,14 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
}
Node* AstGraphBuilder::BuildLoadGlobalProxy() {
Node* global = BuildLoadGlobalObject();
Node* proxy =
BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
return proxy;
}
Node* AstGraphBuilder::BuildToBoolean(Node* input) {
// TODO(titzer): this should be in a JSOperatorReducer.
switch (input->opcode()) {
@ -2109,6 +2137,16 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
}
Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
// TODO(mstarzinger): Should be unified with the VisitThrow implementation.
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
return call;
}
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
const Operator* js_op;
switch (op) {

40
deps/v8/src/compiler/ast-graph-builder.h

@ -26,7 +26,8 @@ class LoopBuilder;
// of function inlining.
class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph);
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment = NULL);
// Creates a graph by visiting the entire AST.
bool CreateGraph();
@ -56,11 +57,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Support for control flow builders. The concrete type of the environment
// depends on the graph builder, but environments themselves are not virtual.
typedef StructuredGraphBuilder::Environment BaseEnvironment;
virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
// TODO(mstarzinger): The pipeline only needs to be a friend to access the
// function context. Remove as soon as the context is a parameter.
friend class Pipeline;
BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
// Getters for values in the activation record.
Node* GetFunctionClosure();
@ -72,6 +69,9 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// other dependencies tracked by the environment might be mutated though.
//
// Builder to create a receiver check for sloppy mode.
Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
// Builder to create a local function context.
Node* BuildLocalFunctionContext(Node* context, Node* closure);
@ -92,6 +92,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Builders for accessing the function context.
Node* BuildLoadBuiltinsObject();
Node* BuildLoadGlobalObject();
Node* BuildLoadGlobalProxy();
Node* BuildLoadClosure();
Node* BuildLoadObjectField(Node* object, int offset);
@ -100,6 +101,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Builders for error reporting at runtime.
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
Node* BuildThrowConstAssignError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
@ -112,13 +114,13 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
// Builder for stack-check guards.
Node* BuildStackCheck();
#define DECLARE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Visiting function for declarations list is overridden.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
private:
CompilationInfo* info_;
@ -126,7 +128,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
JSGraph* jsgraph_;
// List of global declarations for functions and variables.
ZoneList<Handle<Object> > globals_;
ZoneVector<Handle<Object>> globals_;
// Stack of breakable statements entered by the visitor.
BreakableScope* breakable_;
@ -145,7 +147,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
inline StrictMode strict_mode() const;
JSGraph* jsgraph() { return jsgraph_; }
JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
ZoneList<Handle<Object> >* globals() { return &globals_; }
ZoneVector<Handle<Object>>* globals() { return &globals_; }
// Current scope during visitation.
inline Scope* current_scope() const;
@ -345,9 +347,9 @@ class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
public:
explicit AstEffectContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {}
virtual ~AstEffectContext();
virtual void ProduceValue(Node* value) OVERRIDE;
virtual Node* ConsumeValue() OVERRIDE;
~AstEffectContext() FINAL;
void ProduceValue(Node* value) FINAL;
Node* ConsumeValue() FINAL;
};
@ -356,9 +358,9 @@ class AstGraphBuilder::AstValueContext FINAL : public AstContext {
public:
explicit AstValueContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kValue) {}
virtual ~AstValueContext();
virtual void ProduceValue(Node* value) OVERRIDE;
virtual Node* ConsumeValue() OVERRIDE;
~AstValueContext() FINAL;
void ProduceValue(Node* value) FINAL;
Node* ConsumeValue() FINAL;
};
@ -367,9 +369,9 @@ class AstGraphBuilder::AstTestContext FINAL : public AstContext {
public:
explicit AstTestContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kTest) {}
virtual ~AstTestContext();
virtual void ProduceValue(Node* value) OVERRIDE;
virtual Node* ConsumeValue() OVERRIDE;
~AstTestContext() FINAL;
void ProduceValue(Node* value) FINAL;
Node* ConsumeValue() FINAL;
};

2
deps/v8/src/compiler/ast-loop-assignment-analyzer.h

@ -46,7 +46,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
LoopAssignmentAnalysis* Analyze();
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT

5
deps/v8/src/compiler/basic-block-instrumentor.cc

@ -6,10 +6,11 @@
#include <sstream>
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/operator-properties-inl.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
namespace v8 {
@ -69,7 +70,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
CommonOperatorBuilder common(graph->zone());
Node* zero = graph->NewNode(common.Int32Constant(0));
Node* one = graph->NewNode(common.Int32Constant(1));
MachineOperatorBuilder machine;
MachineOperatorBuilder machine(graph->zone());
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save