Browse Source

deps: update v8 to 3.28.73

Reviewed-By: Fedor Indutny <fedor@indutny.com>
PR-URL: https://github.com/joyent/node/pull/8476
v0.11.15-release
Refael Ackermann 10 years ago
committed by Fedor Indutny
parent
commit
9116b240c9
  1. 66
      deps/v8/.DEPS.git
  2. 15
      deps/v8/.gitignore
  3. 3
      deps/v8/AUTHORS
  4. 688
      deps/v8/BUILD.gn
  5. 723
      deps/v8/ChangeLog
  6. 69
      deps/v8/DEPS
  7. 70
      deps/v8/Makefile
  8. 18
      deps/v8/Makefile.android
  9. 2
      deps/v8/Makefile.nacl
  10. 1
      deps/v8/OWNERS
  11. 69
      deps/v8/PRESUBMIT.py
  12. 16
      deps/v8/benchmarks/v8.json
  13. 2
      deps/v8/build/all.gyp
  14. 37
      deps/v8/build/android.gypi
  15. 69
      deps/v8/build/detect_v8_host_arch.py
  16. 20
      deps/v8/build/features.gypi
  17. 26
      deps/v8/build/get_landmines.py
  18. 9
      deps/v8/build/gyp_v8
  19. 114
      deps/v8/build/landmine_utils.py
  20. 139
      deps/v8/build/landmines.py
  21. 54
      deps/v8/build/standalone.gypi
  22. 364
      deps/v8/build/toolchain.gypi
  23. 1
      deps/v8/codereview.settings
  24. 38
      deps/v8/include/libplatform/libplatform.h
  25. 71
      deps/v8/include/v8-debug.h
  26. 9
      deps/v8/include/v8-platform.h
  27. 5
      deps/v8/include/v8-profiler.h
  28. 13
      deps/v8/include/v8-util.h
  29. 429
      deps/v8/include/v8.h
  30. 80
      deps/v8/samples/lineprocessor.cc
  31. 17
      deps/v8/samples/process.cc
  32. 3
      deps/v8/samples/samples.gyp
  33. 32
      deps/v8/samples/shell.cc
  34. 13
      deps/v8/src/DEPS
  35. 368
      deps/v8/src/accessors.cc
  36. 13
      deps/v8/src/accessors.h
  37. 6
      deps/v8/src/allocation-site-scopes.cc
  38. 14
      deps/v8/src/allocation-site-scopes.h
  39. 25
      deps/v8/src/allocation-tracker.cc
  40. 14
      deps/v8/src/allocation.cc
  41. 2
      deps/v8/src/allocation.h
  42. 1145
      deps/v8/src/api.cc
  43. 35
      deps/v8/src/api.h
  44. 30
      deps/v8/src/apinatives.js
  45. 6
      deps/v8/src/arguments.cc
  46. 24
      deps/v8/src/arguments.h
  47. 280
      deps/v8/src/arm/assembler-arm-inl.h
  48. 1187
      deps/v8/src/arm/assembler-arm.cc
  49. 306
      deps/v8/src/arm/assembler-arm.h
  50. 138
      deps/v8/src/arm/builtins-arm.cc
  51. 860
      deps/v8/src/arm/code-stubs-arm.cc
  52. 69
      deps/v8/src/arm/code-stubs-arm.h
  53. 376
      deps/v8/src/arm/codegen-arm.cc
  54. 4
      deps/v8/src/arm/codegen-arm.h
  55. 8
      deps/v8/src/arm/constants-arm.cc
  56. 71
      deps/v8/src/arm/constants-arm.h
  57. 72
      deps/v8/src/arm/cpu-arm.cc
  58. 146
      deps/v8/src/arm/debug-arm.cc
  59. 42
      deps/v8/src/arm/deoptimizer-arm.cc
  60. 141
      deps/v8/src/arm/disasm-arm.cc
  61. 21
      deps/v8/src/arm/frames-arm.cc
  62. 2
      deps/v8/src/arm/frames-arm.h
  63. 701
      deps/v8/src/arm/full-codegen-arm.cc
  64. 471
      deps/v8/src/arm/ic-arm.cc
  65. 438
      deps/v8/src/arm/lithium-arm.cc
  66. 117
      deps/v8/src/arm/lithium-arm.h
  67. 867
      deps/v8/src/arm/lithium-codegen-arm.cc
  68. 53
      deps/v8/src/arm/lithium-codegen-arm.h
  69. 44
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  70. 4
      deps/v8/src/arm/lithium-gap-resolver-arm.h
  71. 647
      deps/v8/src/arm/macro-assembler-arm.cc
  72. 160
      deps/v8/src/arm/macro-assembler-arm.h
  73. 72
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  74. 6
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  75. 229
      deps/v8/src/arm/simulator-arm.cc
  76. 17
      deps/v8/src/arm/simulator-arm.h
  77. 926
      deps/v8/src/arm/stub-cache-arm.cc
  78. 456
      deps/v8/src/arm64/assembler-arm64-inl.h
  79. 1146
      deps/v8/src/arm64/assembler-arm64.cc
  80. 384
      deps/v8/src/arm64/assembler-arm64.h
  81. 148
      deps/v8/src/arm64/builtins-arm64.cc
  82. 788
      deps/v8/src/arm64/code-stubs-arm64.cc
  83. 77
      deps/v8/src/arm64/code-stubs-arm64.h
  84. 144
      deps/v8/src/arm64/codegen-arm64.cc
  85. 4
      deps/v8/src/arm64/codegen-arm64.h
  86. 17
      deps/v8/src/arm64/constants-arm64.h
  87. 37
      deps/v8/src/arm64/cpu-arm64.cc
  88. 71
      deps/v8/src/arm64/cpu-arm64.h
  89. 162
      deps/v8/src/arm64/debug-arm64.cc
  90. 30
      deps/v8/src/arm64/decoder-arm64-inl.h
  91. 14
      deps/v8/src/arm64/decoder-arm64.cc
  92. 4
      deps/v8/src/arm64/decoder-arm64.h
  93. 55
      deps/v8/src/arm64/delayed-masm-arm64-inl.h
  94. 198
      deps/v8/src/arm64/delayed-masm-arm64.cc
  95. 164
      deps/v8/src/arm64/delayed-masm-arm64.h
  96. 90
      deps/v8/src/arm64/deoptimizer-arm64.cc
  97. 70
      deps/v8/src/arm64/disasm-arm64.cc
  98. 10
      deps/v8/src/arm64/disasm-arm64.h
  99. 10
      deps/v8/src/arm64/frames-arm64.cc
  100. 5
      deps/v8/src/arm64/frames-arm64.h

66
deps/v8/.DEPS.git

@ -13,8 +13,14 @@ vars = {
deps = {
'v8/build/gyp':
Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852',
'v8/buildtools':
Var('git_url') + '/chromium/buildtools.git@fb782d4369d5ae04f17a2fceef7de5a63e50f07b',
'v8/testing/gmock':
Var('git_url') + '/external/googlemock.git@896ba0e03f520fb9b6ed582bde2bd00847e3c3f2',
'v8/testing/gtest':
Var('git_url') + '/external/googletest.git@4650552ff637bb44ecf7784060091cbed3252211',
'v8/third_party/icu':
Var('git_url') + '/chromium/deps/icu46.git@7a1ec88f69e25b3efcf76196d07f7815255db025',
Var('git_url') + '/chromium/deps/icu52.git@26d8859357ac0bfb86b939bf21c087b8eae22494',
}
deps_os = {
@ -28,14 +34,68 @@ deps_os = {
}
include_rules = [
'+include',
'+unicode',
'+third_party/fdlibm'
]
skip_child_includes = [
'build',
'third_party'
]
hooks = [
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=win32',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/win/clang-format.exe.sha1'
],
'pattern':
'.',
'name':
'clang_format_win'
},
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=darwin',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/mac/clang-format.sha1'
],
'pattern':
'.',
'name':
'clang_format_mac'
},
{
'action':
[
'download_from_google_storage',
'--no_resume',
'--platform=linux*',
'--no_auth',
'--bucket',
'chromium-clang-format',
'-s',
'v8/buildtools/linux64/clang-format.sha1'
],
'pattern':
'.',
'name':
'clang_format_linux'
},
{
'action':
[

15
deps/v8/.gitignore

@ -21,11 +21,18 @@
#*#
*~
.cpplint-cache
.cproject
.d8_history
.gclient_entries
.project
.pydevproject
.settings
.*.sw?
bsuite
d8
d8_g
gccauses
gcsuspects
shell
shell_g
/_*
@ -33,6 +40,7 @@ shell_g
/build/gyp
/build/ipch/
/build/Release
/buildtools
/hydrogen.cfg
/obj
/out
@ -45,13 +53,18 @@ shell_g
/test/benchmarks/sunspider
/test/mozilla/CHECKED_OUT_VERSION
/test/mozilla/data
/test/mozilla/data.old
/test/mozilla/downloaded_*
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/test262/data
/test/test262/data.old
/test/test262/tc39-test262-*
/third_party
/testing/gmock
/testing/gtest
/third_party/icu
/third_party/llvm
/tools/jsfunfuzz
/tools/jsfunfuzz.zip
/tools/oom_dump/oom_dump

3
deps/v8/AUTHORS

@ -13,6 +13,7 @@ Bloomberg Finance L.P.
NVIDIA Corporation
BlackBerry Limited
Opera Software ASA
Intel Corporation
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
@ -24,6 +25,7 @@ Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Chunyang Dai <chunyang.dai@intel.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
Derek J Conrod <dconrod@codeaurora.org>
@ -64,6 +66,7 @@ Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com>
Weiliang Lin<weiliang.lin@intel.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>

688
deps/v8/BUILD.gn

@ -14,17 +14,11 @@ v8_enable_verify_heap = false
v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_default_platform = true
v8_use_snapshot = true
if (is_debug) {
v8_enable_extra_checks = true
} else {
v8_enable_extra_checks = false
}
# TODO(jochen): Add support for want_seperate_host_toolset.
# TODO(jochen): Add toolchain.gypi support.
v8_use_external_startup_data = false
v8_enable_extra_checks = is_debug
v8_target_arch = cpu_arch
v8_random_seed = "314159265"
###############################################################################
@ -33,14 +27,32 @@ if (is_debug) {
config("internal_config") {
visibility = ":*" # Only targets in this file can depend on this.
include_dirs = [ "src" ]
include_dirs = [ "." ]
if (component_mode == "shared_library") {
defines = [
"V8_SHARED",
"BUILDING_V8_SHARED",
]
}
}
config("internal_config_base") {
visibility = ":*" # Only targets in this file can depend on this.
include_dirs = [ "." ]
}
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
if (is_component_build) {
defines = [
"V8_SHARED",
"USING_V8_SHARED",
]
}
include_dirs = [ "include" ]
}
config("features") {
@ -83,11 +95,6 @@ config("features") {
"V8_I18N_SUPPORT",
]
}
if (v8_use_default_platform == true) {
defines += [
"V8_USE_DEFAULT_PLATFORM",
]
}
if (v8_compress_startup_data == "bz2") {
defines += [
"COMPRESS_STARTUP_DATA_BZ2",
@ -103,25 +110,62 @@ config("features") {
"ENABLE_HANDLE_ZAPPING",
]
}
if (v8_use_external_startup_data == true) {
defines += [
"V8_USE_EXTERNAL_STARTUP_DATA",
]
}
}
###############################################################################
# Actions
#
# TODO(jochen): Do actions need visibility settings as well?
action("generate_trig_table") {
config("toolchain") {
visibility = ":*" # Only targets in this file can depend on this.
script = "tools/generate-trig-table.py"
defines = []
cflags = []
outputs = [
"$target_gen_dir/trig-table.cc"
# TODO(jochen): Add support for arm, mips, mipsel.
if (v8_target_arch == "arm64") {
defines += [
"V8_TARGET_ARCH_ARM64",
]
}
if (v8_target_arch == "x86") {
defines += [
"V8_TARGET_ARCH_IA32",
]
}
if (v8_target_arch == "x64") {
defines += [
"V8_TARGET_ARCH_X64",
]
}
if (is_win) {
defines += [
"WIN32",
]
# TODO(jochen): Support v8_enable_prof.
}
args = rebase_path(outputs, root_build_dir)
# TODO(jochen): Add support for compiling with simulators.
if (is_debug) {
# TODO(jochen): Add support for different debug optimization levels.
defines += [
"ENABLE_DISASSEMBLER",
"V8_ENABLE_CHECKS",
"OBJECT_PRINT",
"VERIFY_HEAP",
"DEBUG",
"OPTIMIZED_DEBUG",
]
}
}
###############################################################################
# Actions
#
action("js2c") {
visibility = ":*" # Only targets in this file can depend on this.
@ -134,9 +178,11 @@ action("js2c") {
sources = [
"src/runtime.js",
"src/v8natives.js",
"src/symbol.js",
"src/array.js",
"src/string.js",
"src/uri.js",
"third_party/fdlibm/fdlibm.js",
"src/math.js",
"src/messages.js",
"src/apinatives.js",
@ -148,8 +194,14 @@ action("js2c") {
"src/regexp.js",
"src/arraybuffer.js",
"src/typedarray.js",
"src/collection.js",
"src/collection-iterator.js",
"src/weak_collection.js",
"src/promise.js",
"src/object-observe.js",
"src/macros.py",
"src/array-iterator.js",
"src/string-iterator.js",
]
outputs = [
@ -160,10 +212,19 @@ action("js2c") {
sources += [ "src/i18n.js" ]
}
args =
rebase_path(outputs, root_build_dir) +
[ "EXPERIMENTAL", v8_compress_startup_data ] +
rebase_path(sources, root_build_dir)
args = [
rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
"CORE",
v8_compress_startup_data
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries.bin", root_build_dir)
]
}
}
action("js2c_experimental") {
@ -177,26 +238,53 @@ action("js2c_experimental") {
sources = [
"src/macros.py",
"src/symbol.js",
"src/proxy.js",
"src/collection.js",
"src/weak_collection.js",
"src/promise.js",
"src/generator.js",
"src/array-iterator.js",
"src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-math.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc"
]
args =
rebase_path(outputs, root_build_dir) +
[ "CORE", v8_compress_startup_data ] +
rebase_path(sources, root_build_dir)
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir),
"EXPERIMENTAL",
v8_compress_startup_data
] + rebase_path(sources, root_build_dir)
if (v8_use_external_startup_data) {
outputs += [ "$target_gen_dir/libraries_experimental.bin" ]
args += [
"--startup_blob",
rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir)
]
}
}
if (v8_use_external_startup_data) {
action("natives_blob") {
visibility = ":*" # Only targets in this file can depend on this.
deps = [
":js2c",
":js2c_experimental"
]
sources = [
"$target_gen_dir/libraries.bin",
"$target_gen_dir/libraries_experimental.bin"
]
outputs = [
"$root_gen_dir/natives_blob.bin"
]
script = "tools/concatenate-files.py"
args = rebase_path(sources + outputs, root_build_dir)
}
}
action("postmortem-metadata") {
@ -218,6 +306,40 @@ action("postmortem-metadata") {
rebase_path(sources, root_build_dir)
}
action("run_mksnapshot") {
visibility = ":*" # Only targets in this file can depend on this.
deps = [ ":mksnapshot($host_toolchain)" ]
script = "tools/run.py"
outputs = [
"$target_gen_dir/snapshot.cc"
]
args = [
"./" + rebase_path(get_label_info(":mksnapshot($host_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--log-snapshot-positions",
"--logfile", rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir)
]
if (v8_random_seed != "0") {
args += [ "--random-seed", v8_random_seed ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_gen_dir/snapshot_blob.bin" ]
args += [
"--startup_blob",
rebase_path("$root_gen_dir/snapshot_blob.bin", root_build_dir)
]
}
}
###############################################################################
# Source Sets (aka static libraries)
#
@ -228,18 +350,64 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
":js2c_experimental",
":generate_trig_table",
":v8_base",
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/trig-table.cc",
"src/snapshot-empty.cc",
"src/snapshot-common.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
}
source_set("v8_snapshot") {
visibility = ":*" # Only targets in this file can depend on this.
deps = [
":js2c",
":js2c_experimental",
":run_mksnapshot",
":v8_base",
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/snapshot-common.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
}
if (v8_use_external_startup_data) {
source_set("v8_external_snapshot") {
visibility = ":*" # Only targets in this file can depend on this.
deps = [
":js2c",
":js2c_experimental",
":run_mksnapshot",
":v8_base",
":natives_blob",
]
sources = [
"src/natives-external.cc",
"src/snapshot-external.cc",
]
configs += [ ":internal_config", ":features" ]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
}
}
source_set("v8_base") {
@ -262,10 +430,10 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast-value-factory.cc",
"src/ast-value-factory.h",
"src/ast.cc",
"src/ast.h",
"src/atomicops.h",
"src/atomicops_internals_x86_gcc.cc",
"src/bignum-dtoa.cc",
"src/bignum-dtoa.h",
"src/bignum.cc",
@ -291,6 +459,95 @@ source_set("v8_base") {
"src/codegen.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/generic-algorithm-inl.h",
"src/compiler/generic-algorithm.h",
"src/compiler/generic-graph.h",
"src/compiler/generic-node-inl.h",
"src/compiler/generic-node.h",
"src/compiler/graph-builder.cc",
"src/compiler/graph-builder.h",
"src/compiler/graph-inl.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h",
"src/compiler/graph-visualizer.cc",
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
"src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
"src/compiler/lowering-builder.cc",
"src/compiler/lowering-builder.h",
"src/compiler/machine-node-factory.h",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.h",
"src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc",
"src/compiler/node-cache.h",
"src/compiler/node-matchers.h",
"src/compiler/node-properties-inl.h",
"src/compiler/node-properties.h",
"src/compiler/node.cc",
"src/compiler/node.h",
"src/compiler/opcodes.h",
"src/compiler/operator-properties-inl.h",
"src/compiler/operator-properties.h",
"src/compiler/operator.h",
"src/compiler/phi-reducer.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
"src/compiler/schedule.h",
"src/compiler/scheduler.cc",
"src/compiler/scheduler.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-node-factory.h",
"src/compiler/simplified-operator.h",
"src/compiler/source-position.cc",
"src/compiler/source-position.h",
"src/compiler/structured-machine-assembler.cc",
"src/compiler/structured-machine-assembler.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
"src/compiler.cc",
"src/compiler.h",
"src/contexts.cc",
@ -303,8 +560,6 @@ source_set("v8_base") {
"src/cpu-profiler-inl.h",
"src/cpu-profiler.cc",
"src/cpu-profiler.h",
"src/cpu.cc",
"src/cpu.h",
"src/data-flow.cc",
"src/data-flow.h",
"src/date.cc",
@ -312,8 +567,6 @@ source_set("v8_base") {
"src/dateparser-inl.h",
"src/dateparser.cc",
"src/dateparser.h",
"src/debug-agent.cc",
"src/debug-agent.h",
"src/debug.cc",
"src/debug.h",
"src/deoptimizer.cc",
@ -348,6 +601,9 @@ source_set("v8_base") {
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/feedback-slots.h",
"src/field-index.cc",
"src/field-index.h",
"src/field-index-inl.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
@ -369,14 +625,32 @@ source_set("v8_base") {
"src/handles.cc",
"src/handles.h",
"src/hashmap.h",
"src/heap-inl.h",
"src/heap-profiler.cc",
"src/heap-profiler.h",
"src/heap-snapshot-generator-inl.h",
"src/heap-snapshot-generator.cc",
"src/heap-snapshot-generator.h",
"src/heap.cc",
"src/heap.h",
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h",
"src/heap/heap-inl.h",
"src/heap/heap.cc",
"src/heap/heap.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
"src/heap/store-buffer-inl.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/sweeper-thread.h",
"src/heap/sweeper-thread.cc",
"src/hydrogen-alias-analysis.h",
"src/hydrogen-bce.cc",
"src/hydrogen-bce.h",
@ -425,6 +699,8 @@ source_set("v8_base") {
"src/hydrogen-sce.h",
"src/hydrogen-store-elimination.cc",
"src/hydrogen-store-elimination.h",
"src/hydrogen-types.cc",
"src/hydrogen-types.h",
"src/hydrogen-uint32-analysis.cc",
"src/hydrogen-uint32-analysis.h",
"src/i18n.cc",
@ -434,8 +710,6 @@ source_set("v8_base") {
"src/ic-inl.h",
"src/ic.cc",
"src/ic.h",
"src/incremental-marking.cc",
"src/incremental-marking.h",
"src/interface.cc",
"src/interface.h",
"src/interpreter-irregexp.cc",
@ -447,14 +721,6 @@ source_set("v8_base") {
"src/jsregexp-inl.h",
"src/jsregexp.cc",
"src/jsregexp.h",
"src/lazy-instance.h",
# TODO(jochen): move libplatform/ files to their own target.
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
"src/libplatform/task-queue.h",
"src/libplatform/worker-thread.cc",
"src/libplatform/worker-thread.h",
"src/list-inl.h",
"src/list.h",
"src/lithium-allocator-inl.h",
@ -471,9 +737,10 @@ source_set("v8_base") {
"src/log-utils.h",
"src/log.cc",
"src/log.h",
"src/lookup-inl.h",
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
"src/mark-compact.cc",
"src/mark-compact.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
@ -481,28 +748,16 @@ source_set("v8_base") {
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
"src/objects-visiting.cc",
"src/objects-visiting.h",
"src/objects.cc",
"src/objects.h",
"src/once.cc",
"src/once.h",
"src/optimizing-compiler-thread.h",
"src/optimizing-compiler-thread.cc",
"src/optimizing-compiler-thread.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parser.cc",
"src/parser.h",
"src/platform/elapsed-timer.h",
"src/platform/time.cc",
"src/platform/time.h",
"src/platform.h",
"src/platform/condition-variable.cc",
"src/platform/condition-variable.h",
"src/platform/mutex.cc",
"src/platform/mutex.h",
"src/platform/semaphore.cc",
"src/platform/semaphore.h",
"src/platform/socket.cc",
"src/platform/socket.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
@ -516,6 +771,7 @@ source_set("v8_base") {
"src/property-details.h",
"src/property.cc",
"src/property.h",
"src/prototype.h",
"src/regexp-macro-assembler-irregexp-inl.h",
"src/regexp-macro-assembler-irregexp.cc",
"src/regexp-macro-assembler-irregexp.h",
@ -547,14 +803,9 @@ source_set("v8_base") {
"src/serialize.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot-common.cc",
"src/snapshot-source-sink.cc",
"src/snapshot-source-sink.h",
"src/snapshot.h",
"src/spaces-inl.h",
"src/spaces.cc",
"src/spaces.h",
"src/store-buffer-inl.h",
"src/store-buffer.cc",
"src/store-buffer.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
@ -563,8 +814,6 @@ source_set("v8_base") {
"src/strtod.h",
"src/stub-cache.cc",
"src/stub-cache.h",
"src/sweeper-thread.h",
"src/sweeper-thread.cc",
"src/token.cc",
"src/token.h",
"src/transitions-inl.h",
@ -587,12 +836,8 @@ source_set("v8_base") {
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
"src/utils/random-number-generator.cc",
"src/utils/random-number-generator.h",
"src/v8.cc",
"src/v8.h",
"src/v8checks.h",
"src/v8globals.h",
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
@ -605,9 +850,11 @@ source_set("v8_base") {
"src/zone-inl.h",
"src/zone.cc",
"src/zone.h",
"third_party/fdlibm/fdlibm.cc",
"third_party/fdlibm/fdlibm.h",
]
if (cpu_arch == "x86") {
if (v8_target_arch == "x86") {
sources += [
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
@ -636,8 +883,12 @@ source_set("v8_base") {
"src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h",
"src/ia32/stub-cache-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc",
]
} else if (cpu_arch == "x64") {
} else if (v8_target_arch == "x64") {
sources += [
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
@ -666,8 +917,12 @@ source_set("v8_base") {
"src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h",
"src/x64/stub-cache-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/linkage-x64.cc",
]
} else if (cpu_arch == "arm") {
} else if (v8_target_arch == "arm") {
sources += [
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
@ -699,8 +954,12 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/stub-cache-arm.cc",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/linkage-arm.cc",
]
} else if (cpu_arch == "arm64") {
} else if (v8_target_arch == "arm64") {
sources += [
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
@ -712,7 +971,6 @@ source_set("v8_base") {
"src/arm64/code-stubs-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/cpu-arm64.h",
"src/arm64/debug-arm64.cc",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
@ -744,8 +1002,12 @@ source_set("v8_base") {
"src/arm64/stub-cache-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/linkage-arm64.cc",
]
} else if (cpu_arch == "mipsel") {
} else if (v8_target_arch == "mipsel") {
sources += [
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
@ -780,41 +1042,122 @@ source_set("v8_base") {
]
}
configs += [ ":internal_config", ":features" ]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
defines = []
deps = [ ":v8_libbase" ]
if (is_linux) {
if (v8_compress_startup_data == "bz2") {
libs += [ "bz2" ]
}
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
# TODO(jochen): Add support for icu_use_data_file_flag
defines += [ "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE" ]
} else {
sources -= [
"src/i18n.cc",
"src/i18n.h",
]
}
if (v8_postmortem_support) {
sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ]
}
}
source_set("v8_libbase") {
visibility = ":*" # Only targets in this file can depend on this.
sources = [
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_x86_msvc.h",
"src/base/build_config.h",
"src/base/cpu.cc",
"src/base/cpu.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
"src/base/logging.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
"src/base/platform/elapsed-timer.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
"src/base/platform/condition-variable.cc",
"src/base/platform/condition-variable.h",
"src/base/platform/mutex.cc",
"src/base/platform/mutex.h",
"src/base/platform/platform.h",
"src/base/platform/semaphore.cc",
"src/base/platform/semaphore.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
defines = []
deps = []
if (is_posix) {
sources += [
"src/platform-posix.cc"
"src/base/platform/platform-posix.cc"
]
}
if (is_linux) {
sources += [
"src/platform-linux.cc"
"src/base/platform/platform-linux.cc"
]
# TODO(brettw)
# 'conditions': [
# ['v8_compress_startup_data=="bz2"', {
# 'libraries': [
# '-lbz2',
# ]
# }],
# ],
libs = [ "rt" ]
} else if (is_android) {
# TODO(brettw) OS=="android" condition from tools/gyp/v8.gyp
defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
if (build_os == "mac") {
if (current_toolchain == host_toolchain) {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
sources += [ "src/base/platform/platform-linux.cc" ]
}
} else {
sources += [ "src/base/platform/platform-linux.cc" ]
if (current_toolchain == host_toolchain) {
defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
}
}
} else if (is_mac) {
sources += [ "src/platform-macos,cc" ]
sources += [ "src/base/platform/platform-macos.cc" ]
} else if (is_win) {
# TODO(jochen): Add support for cygwin.
sources += [
"src/platform-win32.cc",
"src/win32-math.cc",
"src/win32-math.h",
"src/base/platform/platform-win32.cc",
"src/base/win32-headers.h",
"src/base/win32-math.cc",
"src/base/win32-math.h",
]
defines += [ "_CRT_RAND_S" ] # for rand_s()
@ -822,52 +1165,117 @@ source_set("v8_base") {
libs = [ "winmm.lib", "ws2_32.lib" ]
}
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
} else {
sources -= [
"src/i18n.cc",
"src/i18n.h",
source_set("v8_libplatform") {
sources = [
"include/libplatform/libplatform.h",
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
"src/libplatform/task-queue.h",
"src/libplatform/worker-thread.cc",
"src/libplatform/worker-thread.h",
]
}
# TODO(brettw) other conditions from v8.gyp
# TODO(brettw) icu_use_data_file_flag
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ]
deps = [
":v8_libbase",
]
}
###############################################################################
# Executables
#
# TODO(jochen): Remove this as soon as toolchain.gypi is integrated.
if (build_cpu_arch != cpu_arch) {
if (current_toolchain == host_toolchain) {
executable("mksnapshot") {
visibility = ":*" # Only targets in this file can depend on this.
executable("mksnapshot") {
sources = [
"src/mksnapshot.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
deps = [
":v8_base",
":v8_libplatform",
":v8_nosnapshot",
]
if (v8_compress_startup_data == "bz2") {
libs = [ "bz2" ]
}
}
}
} else {
###############################################################################
# Public targets
#
if (component_mode == "shared_library") {
executable("mksnapshot") {
component("v8") {
sources = [
"src/mksnapshot.cc",
"src/v8dll-main.cc",
]
configs += [ ":internal_config", ":features" ]
if (v8_use_external_startup_data) {
deps = [
":v8_base",
":v8_external_snapshot",
]
} else if (v8_use_snapshot) {
deps = [
":v8_base",
":v8_snapshot",
]
} else {
deps = [
":v8_base",
":v8_nosnapshot",
]
}
if (v8_compress_startup_data == "bz2") {
libs = [ "bz2" ]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ]
direct_dependent_configs = [ ":external_config" ]
if (is_android && current_toolchain != host_toolchain) {
libs += [ "log" ]
}
}
} else {
group("v8") {
if (v8_use_external_startup_data) {
deps = [
":v8_base",
":v8_external_snapshot",
]
} else if (v8_use_snapshot) {
deps = [
":v8_base",
":v8_snapshot",
]
} else {
deps = [
":v8_base",
":v8_nosnapshot",
]
}
direct_dependent_configs = [ ":external_config" ]
}
}

723
deps/v8/ChangeLog

@ -1,3 +1,726 @@
2014-08-13: Version 3.28.73
Performance and stability improvements on all platforms.
2014-08-12: Version 3.28.71
ToNumber(Symbol) should throw TypeError (issue 3499).
Performance and stability improvements on all platforms.
2014-08-11: Version 3.28.69
Performance and stability improvements on all platforms.
2014-08-09: Version 3.28.65
Performance and stability improvements on all platforms.
2014-08-08: Version 3.28.64
ES6: Implement WeakMap and WeakSet constructor logic (issue 3399).
Enable ES6 unscopables (issue 3401).
Turn on harmony_unscopables for es_staging (issue 3401).
Remove proxies from --harmony switch for M38, because problems.
Reland "Add initial support for compiler unit tests using GTest/GMock."
(issue 3489).
Enable ES6 iteration by default (issue 2214).
Performance and stability improvements on all platforms.
2014-08-07: Version 3.28.62
Only escape U+0022 in argument values of `String.prototype` HTML methods
(issue 2217).
Update webkit test for expected own properties.
This implements unscopables (issue 3401).
Add `CheckObjectCoercible` for the `String.prototype` HTML methods
(issue 2218).
Add initial support for compiler unit tests using GTest/GMock (issue
3489).
Trigger exception debug events on Promise reject (Chromium issue
393913).
Refactor unit tests for the base library to use GTest (issue 3489).
Performance and stability improvements on all platforms.
2014-08-06: Version 3.28.60
Enable ES6 Map and Set by default (issue 1622).
Performance and stability improvements on all platforms.
2014-08-06: Version 3.28.59
Removed GetConstructor from the API. Instead either get the
"constructor" property stored in the prototype, or keep a side-table.
Enable ES6 Symbols by default (issue 2158).
Performance and stability improvements on all platforms.
2014-08-05: Version 3.28.57
Add dependencies on gtest and gmock.
Performance and stability improvements on all platforms.
2014-08-04: Version 3.28.54
Performance and stability improvements on all platforms.
2014-08-01: Version 3.28.53
Performance and stability improvements on all platforms.
2014-07-31: Version 3.28.52
Performance and stability improvements on all platforms.
2014-07-31: Version 3.28.51
Drop deprecated memory related notification API (Chromium issue 397026).
Performance and stability improvements on all platforms.
2014-07-31: Version 3.28.50
Use emergency memory in the case of out of memory during evacuation
(Chromium issue 395314).
Performance and stability improvements on all platforms.
2014-07-30: Version 3.28.48
Fix Object.freeze with field type tracking. Keep the descriptor properly
intact while update the field type (issue 3458).
Performance and stability improvements on all platforms.
2014-07-29: Version 3.28.45
Performance and stability improvements on all platforms.
2014-07-28: Version 3.28.43
Performance and stability improvements on all platforms.
2014-07-25: Version 3.28.38
Fix issue with setters and their holders in accessors.cc (Chromium issue
3462).
Introduce more debug events for promises (issue 3093).
Move gc notifications from V8 to Isolate and make idle hint mandatory
(Chromium issue 397026).
The accessors should get the value from the holder and not from this
(issue 3461).
Performance and stability improvements on all platforms.
2014-07-24: Version 3.28.35
Rebaseline/update the intl tests with ICU 52 (issue 3454).
Expose the content of Sets and WeakSets through SetMirror (issue 3093).
Performance and stability improvements on all platforms.
2014-07-23: Version 3.28.32
Update ICU to 5.2 (matching chromium) (issue 3452).
Performance and stability improvements on all platforms.
2014-07-22: Version 3.28.31
Remove harmony-typeof.
Implement String.prototype.codePointAt and String.fromCodePoint (issue
2840).
Performance and stability improvements on all platforms.
2014-07-21: Version 3.28.30
Performance and stability improvements on all platforms.
2014-07-21: Version 3.28.29
Performance and stability improvements on all platforms.
2014-07-18: Version 3.28.28
Performance and stability improvements on all platforms.
2014-07-17: Version 3.28.26
Ship ES6 Math functions (issue 2938).
Make ToPrimitive throw on symbol wrappers (issue 3442).
Performance and stability improvements on all platforms.
2014-07-16: Version 3.28.25
Performance and stability improvements on all platforms.
2014-07-16: Version 3.28.24
Removed some copy-n-paste from StackFrame::Foo API entries (issue 3436).
Performance and stability improvements on all platforms.
2014-07-15: Version 3.28.23
Fix error message about read-only symbol properties (issue 3441).
Include symbol properties in Object.{create,defineProperties} (issue
3440).
Performance and stability improvements on all platforms.
2014-07-14: Version 3.28.22
Performance and stability improvements on all platforms.
2014-07-11: Version 3.28.21
Make `let` usable as an identifier in ES6 sloppy mode (issue 2198).
Support ES6 Map and Set in heap profiler (issue 3368).
Performance and stability improvements on all platforms.
2014-07-10: Version 3.28.20
Remove deprecate counter/histogram methods.
Fixed printing of external references (Chromium issue 392068).
Fix several issues with ES6 redeclaration checks (issue 3426).
Performance and stability improvements on all platforms.
2014-07-09: Version 3.28.19
Performance and stability improvements on all platforms.
2014-07-09: Version 3.28.18
Reland "Postpone termination exceptions in debug scope." (issue 3408).
Performance and stability improvements on all platforms.
2014-07-08: Version 3.28.17
MIPS: Fix computed properties on object literals with a double as
propertyname (Chromium issue 390732).
Performance and stability improvements on all platforms.
2014-07-08: Version 3.28.16
Fix computed properties on object literals with a double as propertyname
(Chromium issue 390732).
Avoid brittle use of .bind in Promise.all (issue 3420).
Performance and stability improvements on all platforms.
2014-07-07: Version 3.28.15
Remove a bunch of Isolate::UncheckedCurrent calls.
Performance and stability improvements on all platforms.
2014-07-07: Version 3.28.14
Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue
390732).
Introduce debug events for Microtask queue (Chromium issue 272416).
Split out libplatform into a separate libary.
Add clang-format to presubmit checks.
Stack traces exposed to Javascript should omit extensions (issue 311).
Remove deprecated v8::Context::HasOutOfMemoryException.
Postpone termination exceptions in debug scope (issue 3408).
Performance and stability improvements on all platforms.
2014-07-04: Version 3.28.13
Rollback to r22134.
2014-07-04: Version 3.28.12
Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue
390732).
Introduce debug events for Microtask queue (Chromium issue 272416).
Performance and stability improvements on all platforms.
2014-07-03: Version 3.28.11
Split out libplatform into a separate libary.
Performance and stability improvements on all platforms.
2014-07-03: Version 3.28.10
Add clang-format to presubmit checks.
Stack traces exposed to Javascript should omit extensions (issue 311).
Remove deprecated v8::Context::HasOutOfMemoryException.
Postpone termination exceptions in debug scope (issue 3408).
Performance and stability improvements on all platforms.
2014-07-02: Version 3.28.9
Make freeze & friends ignore private properties (issue 3419).
Introduce a builddeps make target (issue 3418).
Performance and stability improvements on all platforms.
2014-07-01: Version 3.28.8
Remove static initializer from isolate.
ES6: Add missing Set.prototype.keys function (issue 3411).
Introduce debug events for promises (issue 3093).
Performance and stability improvements on all platforms.
2014-06-30: Version 3.28.7
Performance and stability improvements on all platforms.
2014-06-30: Version 3.28.6
Unbreak "os" stuff in shared d8 builds (issue 3407).
Performance and stability improvements on all platforms.
2014-06-26: Version 3.28.4
Compile optimized code with active debugger but no break points
(Chromium issue 386492).
Optimize Map/Set.prototype.forEach.
Collect garbage with kReduceMemoryFootprintMask in IdleNotification
(Chromium issue 350720).
Performance and stability improvements on all platforms.
2014-06-26: Version 3.28.3
Grow heap slower if GC freed many global handles (Chromium issue
263503).
Performance and stability improvements on all platforms.
2014-06-25: Version 3.28.2
Remove bogus assertions in HCompareObjectEqAndBranch (Chromium issue
387636).
Do not eagerly update allow_osr_at_loop_nesting_level (Chromium issue
387599).
Set host_arch to ia32 on machines with a 32bit userland but a 64bit
kernel (Chromium issue 368384).
Map/Set: Implement constructor parameter handling (issue 3398).
Performance and stability improvements on all platforms.
2014-06-24: Version 3.28.1
Support LiveEdit on Arm64 (Chromium issue 368580).
Run JS micro tasks in the appropriate context (Chromium issue 385349).
Add a use counter API.
Set host_arch to ia32 on machines with a 32bit userland but a 64bit
kernel.
Performance and stability improvements on all platforms.
2014-06-23: Version 3.28.0
MIPS: Support LiveEdit (Chromium issue 368580).
Array.concat: properly go to dictionary mode when required (Chromium
issue 387031).
Support LiveEdit on ARM (Chromium issue 368580).
Performance and stability improvements on all platforms.
2014-06-18: Version 3.27.34
Reduce number of writes to DependentCode array when inserting dependent
IC (Chromium issue 305878).
Performance and stability improvements on all platforms.
2014-06-17: Version 3.27.33
Do GC if CodeRange fails to allocate a block (Chromium issue 305878).
Throw syntax error when a getter/setter has the wrong number of params
(issue 3371).
Performance and stability improvements on all platforms.
2014-06-17: Version 3.27.32
Performance and stability improvements on all platforms.
2014-06-16: Version 3.27.31
Version fix.
2014-06-16: Version 3.27.30
Fix representation of Phis for mutable-heapnumber-in-object-literal
properties (issue 3392).
Performance and stability improvements on all platforms.
2014-06-16: Version 3.27.29
Emulate MLS on pre-ARMv6T2. Cleaned up thumbee vs. thumb2 confusion.
X87: Fixed flooring division by a power of 2, once again.. (issue 3259).
Fixed undefined behavior in RNG (Chromium issue 377790).
Performance and stability improvements on all platforms.
2014-06-13: Version 3.27.28
Add v8::Promise::Then (Chromium issue 371288).
Performance and stability improvements on all platforms.
2014-06-12: Version 3.27.27
Fix detection of VFP3D16 on Galaxy Tab 10.1 (issue 3387).
Performance and stability improvements on all platforms.
2014-06-12: Version 3.27.26
MIPS: Fixed flooring division by a power of 2, once again.. (issue
3259).
Fixed flooring division by a power of 2, once again.. (issue 3259).
Fix unsigned comparisons (issue 3380).
Performance and stability improvements on all platforms.
2014-06-11: Version 3.27.25
Performance and stability improvements on all platforms.
2014-06-11: Version 3.27.24
Fix invalid attributes when generalizing because of incompatible map
change (Chromium issue 382143).
Fix missing smi check in inlined indexOf/lastIndexOf (Chromium issue
382513).
Performance and stability improvements on all platforms.
2014-06-06: Version 3.27.23
Performance and stability improvements on all platforms.
2014-06-06: Version 3.27.22
Performance and stability improvements on all platforms.
2014-06-06: Version 3.27.21
Turn on harmony_collections for es_staging (issue 1622).
Do not make heap iterable eagerly (Chromium issue 379740).
Performance and stability improvements on all platforms.
2014-06-05: Version 3.27.20
Fix invalid loop condition for Array.lastIndexOf() (Chromium issue
380512).
Add API support for passing a C++ function as a microtask callback.
Performance and stability improvements on all platforms.
2014-06-04: Version 3.27.19
Split Put into Put and Remove.
ES6: Add support for values/keys/entries for Map and Set (issue 1793).
Performance and stability improvements on all platforms.
2014-06-03: Version 3.27.18
Remove PROHIBITS_OVERWRITING as it is subsumed by non-configurable
properties.
Performance and stability improvements on all platforms.
2014-06-02: Version 3.27.17
BuildNumberToString: Check for undefined keys in the cache (Chromium
issue 368114).
HRor and HSar can deoptimize (issue 3359).
Simplify, speed-up correct-context ObjectObserve calls.
Performance and stability improvements on all platforms.
2014-05-29: Version 3.27.16
Allow microtasks to throw exceptions and handle them gracefully
(Chromium issue 371566).
Performance and stability improvements on all platforms.
2014-05-28: Version 3.27.15
Performance and stability improvements on all platforms.
2014-05-27: Version 3.27.14
Reland "Customized support for feedback on calls to Array." and follow-
up fixes (Chromium issues 377198, 377290).
Performance and stability improvements on all platforms.
2014-05-26: Version 3.27.13
Performance and stability improvements on all platforms.
2014-05-26: Version 3.27.12
Check for cached transition to ExternalArray elements kind (issue 3337).
Support ES6 weak collections in heap profiler (Chromium issue 376196).
Performance and stability improvements on all platforms.
2014-05-23: Version 3.27.11
Add support for ES6 Symbol in heap profiler (Chromium issue 376194).
Performance and stability improvements on all platforms.
2014-05-22: Version 3.27.10
Implement Mirror object for Symbols (issue 3290).
Allow debugger to step into Map and Set forEach callbacks (issue 3341).
Fix ArrayShift hydrogen support (Chromium issue 374838).
Use SameValueZero for Map and Set (issue 1622).
Array Iterator next should check for own property.
Performance and stability improvements on all platforms.
2014-05-21: Version 3.27.9
Disable ArrayShift hydrogen support (Chromium issue 374838).
ES6 Map/Set iterators/forEach improvements (issue 1793).
Performance and stability improvements on all platforms.
2014-05-20: Version 3.27.8
Move microtask queueing logic from JavaScript to C++.
Partial revert of "Next bunch of fixes for check elimination" (Chromium
issue 372173).
Performance and stability improvements on all platforms.
2014-05-19: Version 3.27.7
Performance and stability improvements on all platforms.
2014-05-19: Version 3.27.6
Performance and stability improvements on all platforms.
2014-05-16: Version 3.27.5
Performance and stability improvements on all platforms.
2014-05-15: Version 3.27.4
Drop thenable coercion cache (Chromium issue 372788).
Skip write barriers when updating the weak hash table (Chromium issue
359401).
Performance and stability improvements on all platforms.
2014-05-14: Version 3.27.3
Performance and stability improvements on all platforms.
2014-05-13: Version 3.27.2
Harden %SetIsObserved with RUNTIME_ASSERTs (Chromium issue 371782).
Drop unused static microtask API.
Introduce an api to query the microtask autorun state of an isolate.
Performance and stability improvements on all platforms.
2014-05-12: Version 3.27.1
Object.observe: avoid accessing acceptList properties more than once
(issue 3315).
Array Iterator prototype should not have a constructor (issue 3293).
Fix typos in unit test for Array.prototype.fill().
Shorten autogenerated error message for functions only (issue 3019,
Chromium issue 331971).
Reland "Removed default Isolate." (Chromium issue 359977).
Performance and stability improvements on all platforms.
2014-05-09: Version 3.27.0
Unbreak samples and tools.
Performance and stability improvements on all platforms.
2014-05-08: Version 3.26.33
Removed default Isolate (Chromium issue 359977).

69
deps/v8/DEPS

@ -2,26 +2,89 @@
# directory and assume that the root of the checkout is in ./v8/, so
# all paths in here must match this assumption.
vars = {
"chromium_trunk": "https://src.chromium.org/svn/trunk",
"buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
}
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu":
"https://src.chromium.org/svn/trunk/deps/third_party/icu46@258359",
Var("chromium_trunk") + "/deps/third_party/icu52@277999",
"v8/buildtools":
"https://chromium.googlesource.com/chromium/buildtools.git@" +
Var("buildtools_revision"),
"v8/testing/gtest":
"http://googletest.googlecode.com/svn/trunk@692",
"v8/testing/gmock":
"http://googlemock.googlecode.com/svn/trunk@485",
}
deps_os = {
"win": {
"v8/third_party/cygwin":
"http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
"http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
Var("chromium_trunk") + "/tools/third_party/python_26@89111",
}
}
include_rules = [
# Everybody can use some things.
"+include",
"+unicode",
"+third_party/fdlibm",
]
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
"build",
"third_party",
]
hooks = [
# Pull clang-format binaries using checked-in hashes.
{
"name": "clang_format_win",
"pattern": ".",
"action": [ "download_from_google_storage",
"--no_resume",
"--platform=win32",
"--no_auth",
"--bucket", "chromium-clang-format",
"-s", "v8/buildtools/win/clang-format.exe.sha1",
],
},
{
"name": "clang_format_mac",
"pattern": ".",
"action": [ "download_from_google_storage",
"--no_resume",
"--platform=darwin",
"--no_auth",
"--bucket", "chromium-clang-format",
"-s", "v8/buildtools/mac/clang-format.sha1",
],
},
{
"name": "clang_format_linux",
"pattern": ".",
"action": [ "download_from_google_storage",
"--no_resume",
"--platform=linux*",
"--no_auth",
"--bucket", "chromium-clang-format",
"-s", "v8/buildtools/linux64/clang-format.sha1",
],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",

70
deps/v8/Makefile

@ -70,6 +70,10 @@ ifeq ($(backtrace), off)
else
GYPFLAGS += -Dv8_enable_backtrace=1
endif
# verifypredictable=on
ifeq ($(verifypredictable), on)
GYPFLAGS += -Dv8_enable_verify_predictable=1
endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@ -156,11 +160,6 @@ ifeq ($(armv7), true)
endif
endif
endif
# vfp2=off. Deprecated, use armfpu=
# vfp3=off. Deprecated, use armfpu=
ifeq ($(vfp3), off)
GYPFLAGS += -Darm_fpu=vfp
endif
# hardfp=on/off. Deprecated, use armfloatabi
ifeq ($(hardfp),on)
GYPFLAGS += -Darm_float_abi=hard
@ -169,16 +168,10 @@ ifeq ($(hardfp),off)
GYPFLAGS += -Darm_float_abi=softfp
endif
endif
# armneon=on/off
ifeq ($(armneon), on)
GYPFLAGS += -Darm_neon=1
endif
# fpu: armfpu=xxx
# xxx: vfp, vfpv3-d16, vfpv3, neon.
ifeq ($(armfpu),)
ifneq ($(vfp3), off)
GYPFLAGS += -Darm_fpu=default
endif
else
GYPFLAGS += -Darm_fpu=$(armfpu)
endif
@ -198,19 +191,19 @@ ifeq ($(armthumb), on)
GYPFLAGS += -Darm_thumb=1
endif
endif
# armtest=on
# arm_test_noprobe=on
# With this flag set, by default v8 will only use features implied
# by the compiler (no probe). This is done by modifying the default
# values of enable_armv7, enable_vfp2, enable_vfp3 and enable_32dregs.
# values of enable_armv7, enable_vfp3, enable_32dregs and enable_neon.
# Modifying these flags when launching v8 will enable the probing for
# the specified values.
# When using the simulator, this flag is implied.
ifeq ($(armtest), on)
GYPFLAGS += -Darm_test=on
ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
# - "builddeps": pulls in external dependencies for building
# - "dependencies": pulls in all external dependencies
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
@ -228,11 +221,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mips mipsel
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
@ -258,7 +251,7 @@ NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
qc quickcheck $(QUICKCHECKS) \
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
@ -406,18 +399,22 @@ clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.cl
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
$(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
-Dv8_target_arch=$(V8_TARGET_ARCH) \
$(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
-Dtarget_arch=$(V8_TARGET_ARCH),) \
$(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@ -471,11 +468,26 @@ GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null)
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
# Dependencies.
# Dependencies. "builddeps" are dependencies required solely for building,
# "dependencies" includes also dependencies required for development.
# Remember to keep these in sync with the DEPS file.
dependencies:
builddeps:
svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
--revision 1831
if svn info third_party/icu 2>&1 | grep -q icu46 ; then \
svn switch --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
third_party/icu --revision 277999 ; \
else \
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
third_party/icu --revision 258359
https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
third_party/icu --revision 277999 ; \
fi
svn checkout --force http://googletest.googlecode.com/svn/trunk \
testing/gtest --revision 692
svn checkout --force http://googlemock.googlecode.com/svn/trunk \
testing/gmock --revision 485
dependencies: builddeps
# The spec is a copy of the hooks in v8's DEPS file.
gclient sync -r fb782d4369d5ae04f17a2fceef7de5a63e50f07b --spec="solutions = [{u'managed': False, u'name': u'buildtools', u'url': u'https://chromium.googlesource.com/chromium/buildtools.git', u'custom_deps': {}, u'custom_hooks': [{u'name': u'clang_format_win',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=win32',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/win/clang-format.exe.sha1']},{u'name': u'clang_format_mac',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=darwin',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/mac/clang-format.sha1']},{u'name': u'clang_format_linux',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=linux*',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/linux64/clang-format.sha1']}],u'deps_file': u'.DEPS.git', u'safesync_url': u''}]"

18
deps/v8/Makefile.android

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel
ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@ -51,13 +51,13 @@ ifeq ($(ARCH), android_arm)
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
TOOLCHAIN_VER = 4.8
else
ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=20
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=L
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
TOOLCHAIN_VER = 4.9
else
ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
@ -72,11 +72,18 @@ else
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
endif
endif
endif
TOOLCHAIN_PATH = \
@ -91,6 +98,7 @@ endif
# For mksnapshot host generation.
DEFINES += host_os=${HOST_OS}
DEFINES += OS=android
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@ -112,7 +120,7 @@ $(ANDROID_MAKEFILES):
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

2
deps/v8/Makefile.nacl

@ -97,7 +97,7 @@ $(NACL_MAKEFILES):
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \

1
deps/v8/OWNERS

@ -18,4 +18,5 @@ titzer@chromium.org
ulan@chromium.org
vegorov@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org

69
deps/v8/PRESUBMIT.py

@ -31,6 +31,9 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
@ -38,6 +41,8 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckGeneratedRuntimeTests
from presubmit import CheckExternalReferenceRegistration
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@ -46,6 +51,65 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Generated runtime tests check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
@ -54,7 +118,10 @@ def _CommonChecks(input_api, output_api):
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
return results
@ -110,7 +177,9 @@ def GetPreferredTryMasters(project, change):
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']),
},
}

16
deps/v8/benchmarks/v8.json

@ -0,0 +1,16 @@
{
"path": ["."],
"main": "run.js",
"run_count": 2,
"results_regexp": "^%s: (.+)$",
"benchmarks": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "Crypto"},
{"name": "RayTrace"},
{"name": "EarleyBoyer"},
{"name": "RegExp"},
{"name": "Splay"},
{"name": "NavierStokes"}
]
}

2
deps/v8/build/all.gyp

@ -10,7 +10,9 @@
'dependencies': [
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/base-unittests/base-unittests.gyp:*',
'../test/cctest/cctest.gyp:*',
'../test/compiler-unittests/compiler-unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {

37
deps/v8/build/android.gypi

@ -35,9 +35,6 @@
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
# This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend.
'android_webview_build%': 0,
},
'conditions': [
['android_ndk_root==""', {
@ -64,9 +61,6 @@
# link the NDK one?
'use_system_stlport%': '<(android_webview_build)',
'android_stlport_library': 'stlport_static',
# Copy it out one scope.
'android_webview_build%': '<(android_webview_build)',
'OS': 'android',
}, # variables
'target_defaults': {
'defines': [
@ -81,7 +75,12 @@
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', ],
'-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which in
# turn will leave out some template stuff for 'long long'. What
# we want is -std=c++11, but this is not supported by GCC 4.6 or
# Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
@ -179,7 +178,7 @@
'-L<(android_stlport_libs)/mips',
],
}],
['target_arch=="ia32"', {
['target_arch=="ia32" or target_arch=="x87"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
@ -196,7 +195,7 @@
}],
],
}],
['target_arch=="ia32"', {
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
@ -215,6 +214,15 @@
'-fno-stack-protector',
],
}],
['target_arch=="arm64" or target_arch=="x64"', {
# TODO(ulan): Enable PIE for other architectures (crbug.com/373219).
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
@ -257,15 +265,8 @@
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'conditions': [
['target_arch=="x64"', {
'cflags': [ '-m64', '-pthread' ],
'ldflags': [ '-m64', '-pthread' ],
}, {
'cflags': [ '-m32', '-pthread' ],
'ldflags': [ '-m32', '-pthread' ],
}],
],
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',

69
deps/v8/build/detect_v8_host_arch.py

@ -0,0 +1,69 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Outputs host CPU architecture in format recognized by gyp."""
import platform
import re
import sys
def main():
print DoMain([])
return 0
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
elif host_arch == 'aarch64':
host_arch = 'arm64'
elif host_arch == 'mips64':
host_arch = 'mips64el'
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
if __name__ == '__main__':
sys.exit(main())

20
deps/v8/build/features.gypi

@ -41,6 +41,8 @@
'v8_use_snapshot%': 'true',
'v8_enable_verify_predictable%': 0,
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@ -57,8 +59,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
# Use the v8 provided v8::Platform implementation.
'v8_use_default_platform%': 1,
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
'v8_use_external_startup_data%': 0,
},
'target_defaults': {
'conditions': [
@ -74,6 +77,9 @@
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
}],
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
@ -83,13 +89,11 @@
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_use_default_platform==1', {
'defines': ['V8_USE_DEFAULT_PLATFORM',],
}],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
],
'defines': ['COMPRESS_STARTUP_DATA_BZ2',],
}],
['v8_use_external_startup_data==1', {
'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
}],
], # conditions
'configurations': {

26
deps/v8/build/get_landmines.py

@ -0,0 +1,26 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
def main():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
print 'Need to clobber after ICU52 roll.'
print 'Landmines test.'
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
return 0
if __name__ == '__main__':
sys.exit(main())

9
deps/v8/build/gyp_v8

@ -34,6 +34,7 @@ import glob
import os
import platform
import shlex
import subprocess
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
@ -107,6 +108,14 @@ def additional_include_files(args=[]):
def run_gyp(args):
rc = gyp.main(args)
# Check for landmines (reasons to clobber the build). This must be run here,
# rather than a separate runhooks step so that any environment modifications
# from above are picked up.
print 'Running build/landmines.py...'
subprocess.check_call(
[sys.executable, os.path.join(script_dir, 'landmines.py')])
if rc != 0:
print 'Error running GYP'
sys.exit(rc)

114
deps/v8/build/landmine_utils.py

@ -0,0 +1,114 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print '%s -> %r' % (func.__name__, val[0])
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targetted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'make'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'msvs'
elif IsLinux():
return 'make'
elif IsMac():
return 'xcode'
else:
assert False, 'Don\'t know what builder we\'re using!'

139
deps/v8/build/landmines.py

@ -0,0 +1,139 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as a hook. If it detects that the build should
be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
various build scripts will then check for the presence of this file and clobber
accordingly. The script will also emit the reasons for the clobber to stdout.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import logging
import optparse
import os
import sys
import subprocess
import time
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_target_build_dir(build_tool, target):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out\Release'
'/mnt/data/b/build/slave/linux/build/src/out/Debug'
'/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild', target)
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
ret = os.path.join(SRC_DIR, 'out', target)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build', target)
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def set_up_landmines(target, new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_target_build_dir(landmine_utils.builder(), target)
landmines_path = os.path.join(out_dir, '.landmines')
if not os.path.exists(out_dir):
return
if not os.path.exists(landmines_path):
print "Landmines tracker didn't exists."
# FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
# to know if we are right after a clobber or if it is first-time landmines
# deployment. Also, a landmine-triggered clobber right after a clobber is
# not possible. Different clobber methods for msvs, xcode and make all
# have different blacklists of files that are not deleted.
if os.path.exists(landmines_path):
triggered = os.path.join(out_dir, '.landmines_triggered')
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
with open(triggered, 'w') as f:
f.writelines(diff)
print "Setting landmine: %s" % triggered
elif os.path.exists(triggered):
# Remove false triggered landmines.
os.remove(triggered)
print "Removing landmine: %s" % triggered
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
for target in ('Debug', 'Release'):
set_up_landmines(target, landmines)
return 0
if __name__ == '__main__':
sys.exit(main())

54
deps/v8/build/standalone.gypi

@ -33,8 +33,8 @@
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'clang%': 0,
'asan%': 0,
'tsan%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
@ -51,13 +51,7 @@
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;\
s/amd64/x64/;\
s/arm.*/arm/;\
s/aarch64/arm64/;\
s/mips.*/mipsel/")',
'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
@ -104,6 +98,7 @@
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset': 1,
@ -115,16 +110,20 @@
}, {
'os_posix%': 1,
}],
['(v8_target_arch=="ia32" or v8_target_arch=="x64") and \
['(v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
(OS=="linux" or OS=="mac")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
}],
['OS=="mac"', {
'clang%': 1,
}, {
'clang%': 0,
}],
],
# Default ARM variable settings.
'arm_version%': 'default',
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
'arm_thumb': 'default',
@ -192,17 +191,36 @@
],
},
}],
['tsan==1', {
'target_defaults': {
'cflags+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=thread',
'-fPIC',
'-Wno-c++11-extensions',
],
'cflags!': [
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=thread',
'-pie',
],
'defines': [
'THREAD_SANITIZER',
],
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-pthread', '-fno-exceptions', '-pedantic' ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
'-Wno-long-long', '-pthread', '-fno-exceptions',
'-pedantic' ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
[ 'visibility=="hidden" and v8_enable_backtrace==0', {
'cflags': [ '-fvisibility=hidden' ],
}],
@ -218,7 +236,7 @@
'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-fno-exceptions' ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'conditions': [
[ 'visibility=="hidden"', {
'cflags': [ '-fvisibility=hidden' ],
@ -316,7 +334,7 @@
'target_defaults': {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'GCC_C_LANGUAGE_STANDARD': 'ansi', # -ansi
'GCC_C_LANGUAGE_STANDARD': 'c99', # -std=c99
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic
# (Equivalent to -fPIC)
@ -352,7 +370,7 @@
['clang==1', {
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++11', # -std=gnu++11
'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x
},
}],
],

364
deps/v8/build/toolchain.gypi

@ -31,7 +31,7 @@
'variables': {
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'clang%': 0,
'v8_target_arch%': '<(target_arch)',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
@ -47,7 +47,7 @@
# these registers in the snapshot and use CPU feature probing when running
# on the target.
'v8_can_use_vfp32dregs%': 'false',
'arm_test%': 'off',
'arm_test_noprobe%': 'off',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
@ -56,7 +56,7 @@
'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
'mips_arch_variant%': 'r2',
'v8_enable_backtrace%': 0,
@ -82,38 +82,88 @@
# Allow to suppress the array bounds warning (default is no suppression).
'wno_array_bounds%': '',
'variables': {
# This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend.
'android_webview_build%': 0,
},
# Copy it out one scope.
'android_webview_build%': '<(android_webview_build)',
},
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
},
}, {
'variables': {
'host_cxx_is_biarch%': 0,
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
},
}, {
'variables': {
'target_cxx_is_biarch%': 0,
},
}],
],
'target_defaults': {
'conditions': [
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
],
'conditions': [
[ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16" or arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
'CAN_USE_NEON',
],
}],
[ 'arm_test_noprobe=="on"', {
'defines': [
'ARM_TEST_NO_FEATURE_PROBE',
],
}],
],
'target_conditions': [
['_toolset=="host"', {
'variables': {
'armcompiler': '<!($(echo ${CXX_host:-$(which g++)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
},
'conditions': [
['armcompiler=="yes"', {
['v8_target_arch==host_arch and android_webview_build==0', {
# Host built with an Arm CXX compiler.
'conditions': [
[ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
[ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
},
{
'conditions': [
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
],
}],
],
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
}],
@ -123,44 +173,11 @@
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
[ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
}, {
# armcompiler=="no"
'conditions': [
[ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
# 'v8_target_arch!=host_arch'
# Host not built with an Arm CXX compiler (simulator build).
'conditions': [
[ 'arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
],
}],
[ 'arm_float_abi=="hard"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
@ -172,36 +189,24 @@
],
}],
],
'defines': [
'ARM_TEST',
],
}],
],
}], # _toolset=="host"
['_toolset=="target"', {
'variables': {
'armcompiler': '<!($(echo ${CXX_target:-<(CXX)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
},
'conditions': [
['armcompiler=="yes"', {
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with an Arm CXX compiler.
'conditions': [
[ 'arm_version==7', {
'cflags': ['-march=armv7-a',],
}],
[ 'arm_version==7 or arm_version=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
},
{
'conditions': [
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
],
}],
],
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
}],
@ -211,44 +216,11 @@
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
[ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
}, {
# armcompiler=="no"
# 'v8_target_arch!=target_arch'
# Target not built with an Arm CXX compiler (simulator build).
'conditions': [
[ 'arm_version==7 or arm_version=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
'conditions': [
[ 'arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
],
}],
[ 'arm_float_abi=="hard"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
@ -260,9 +232,6 @@
],
}],
],
'defines': [
'ARM_TEST',
],
}],
],
}], # _toolset=="target"
@ -278,15 +247,19 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="x87"', {
'defines': [
'V8_TARGET_ARCH_X87',
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EB'],
@ -299,10 +272,10 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
@ -324,7 +297,7 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
],
@ -333,11 +306,9 @@
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
@ -350,10 +321,10 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
['mips_arch_variant=="r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', {
@ -378,7 +349,7 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
@ -386,6 +357,68 @@
}],
],
}], # v8_target_arch=="mipsel"
['v8_target_arch=="mips64el"', {
'defines': [
'V8_TARGET_ARCH_MIPS64',
],
'conditions': [
['v8_target_arch==target_arch and android_webview_build==0', {
# Target built with a Mips CXX compiler.
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="r6"', {
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [
'-mips64r6', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-mips64r2', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
],
}],
],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
],
}], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
@ -400,16 +433,42 @@
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
['v8_target_arch=="x32"', {
'defines': [
# x32 port shares the source code with x64 port.
'V8_TARGET_ARCH_X64',
'V8_TARGET_ARCH_32_BIT',
],
'cflags': [
'-mx32',
# Inhibit warning if long long type is used.
'-Wno-long-long',
],
'ldflags': [
'-mx32',
],
}], # v8_target_arch=="x32"
['OS=="win"', {
'defines': [
'WIN32',
],
# 4351: VS 2005 and later are warning us that they've fixed a bug
# present in VS 2003 and earlier.
'msvs_disabled_warnings': [4351],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}],
['OS=="win" and v8_target_arch=="ia32"', {
'msvs_settings': {
'VCCLCompilerTool': {
# Ensure no surprising artifacts from 80bit double math with x86.
'AdditionalOptions': ['/arch:SSE2'],
},
},
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
'VCLinkerTool': {
@ -417,44 +476,28 @@
},
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="qnx"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
], # conditions
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel")', {
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'conditions': [
['host_cxx_is_biarch==1', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ]
}],
],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
['((OS!="android" and OS!="qnx") or clang==1) and \
nacl_target_arch!="nacl_x64"', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
],
'xcode_settings': {
@ -465,28 +508,35 @@
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64")', {
# Check whether the host compiler and target compiler support the
# '-m64' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
'conditions': [
['host_cxx_is_biarch==1', {
'cflags': [ '-m64' ],
'ldflags': [ '-m64' ]
}],
],
}],
['_toolset=="target"', {
'variables': {
'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
},
'conditions': [
['((OS!="android" and OS!="qnx") or clang==1)', {
'cflags': [ '<(m64flag)' ],
'ldflags': [ '<(m64flag)' ],
['target_cxx_is_biarch==1', {
'cflags': [ '-m64' ],
'ldflags': [ '-m64' ],
}],
]
}],
],
}]
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="qnx"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
], # conditions
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],

1
deps/v8/codereview.settings

@ -5,3 +5,4 @@ STATUS: http://v8-status.appspot.com/status
TRY_ON_UPLOAD: False
TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
TRYSERVER_ROOT: v8
PROJECT: v8

38
deps/v8/include/libplatform/libplatform.h

@ -0,0 +1,38 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
#include "include/v8-platform.h"
namespace v8 {
namespace platform {
/**
* Returns a new instance of the default v8::Platform implementation.
*
* The caller will take ownership of the returned pointer. |thread_pool_size|
* is the number of worker threads to allocate for background jobs. If a value
* of zero is passed, a suitable default based on the current number of
* processors online will be chosen.
*/
v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
/**
* Pumps the message loop for the given isolate.
*
* The caller has to make sure that this is called from the right thread.
* Returns true if a task was executed, and false otherwise. This call does
* not block if no task is pending. The |platform| has to be created using
* |CreateDefaultPlatform|.
*/
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
} // namespace platform
} // namespace v8
#endif // V8_LIBPLATFORM_LIBPLATFORM_H_

71
deps/v8/include/v8-debug.h

@ -19,8 +19,10 @@ enum DebugEvent {
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
ScriptCollected = 6,
BreakForCommand = 7
CompileError = 6,
PromiseEvent = 7,
AsyncTaskEvent = 8,
BreakForCommand = 9
};
@ -137,7 +139,7 @@ class V8_EXPORT Debug {
* A EventCallback2 does not take possession of the event data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*EventCallback2)(const EventDetails& event_details);
typedef void (*EventCallback)(const EventDetails& event_details);
/**
* Debug message callback function.
@ -147,23 +149,14 @@ class V8_EXPORT Debug {
* A MessageHandler2 does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
/**
* Debug host dispatch callback function.
*/
typedef void (*HostDispatchHandler)();
typedef void (*MessageHandler)(const Message& message);
/**
* Callback function for the host to ensure debug messages are processed.
*/
typedef void (*DebugMessageDispatchHandler)();
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
// Set a JavaScript debug event listener.
static bool SetDebugEventListener(v8::Handle<v8::Object> that,
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
// Schedule a debugger break to happen when JavaScript code is run
@ -181,36 +174,13 @@ class V8_EXPORT Debug {
// stops.
static void DebugBreakForCommand(Isolate* isolate, ClientData* data);
// TODO(svenpanne) Remove this when Chrome is updated.
static void DebugBreakForCommand(ClientData* data, Isolate* isolate) {
DebugBreakForCommand(isolate, data);
}
// Message based interface. The message protocol is JSON.
static void SetMessageHandler2(MessageHandler2 handler);
static void SetMessageHandler(MessageHandler handler);
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
ClientData* client_data = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler,
int period = 100);
/**
* Register a callback function to be called when a debug message has been
* received and is ready to be processed. For the debug messages to be
* processed V8 needs to be entered, and in certain embedding scenarios this
* callback can be used to make sure V8 is entered for the debug message to
* be processed. Note that debug messages will only be processed if there is
* a V8 break. This can happen automatically by using the option
* --debugger-auto-break.
* \param provide_locker requires that V8 acquires v8::Locker for you before
* calling handler
*/
static void SetDebugMessageDispatchHandler(
DebugMessageDispatchHandler handler, bool provide_locker = false);
/**
* Run a JavaScript function in the debugger.
* \param fun the function to call
@ -237,22 +207,6 @@ class V8_EXPORT Debug {
*/
static Local<Value> GetMirror(v8::Handle<v8::Value> obj);
/**
* Enable the V8 builtin debug agent. The debugger agent will listen on the
* supplied TCP/IP port for remote debugger connection.
* \param name the name of the embedding application
* \param port the TCP/IP port to listen on
* \param wait_for_connection whether V8 should pause on a first statement
* allowing remote debugger to connect before anything interesting happened
*/
static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false);
/**
* Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
*/
static void DisableAgent();
/**
* Makes V8 process all pending debug messages.
*
@ -271,10 +225,6 @@ class V8_EXPORT Debug {
* until V8 gets control again; however, embedding application may improve
* this by manually calling this method.
*
* It makes sense to call this method whenever a new debug message arrived and
* V8 is not already running. Method v8::Debug::SetDebugMessageDispatchHandler
* should help with the former condition.
*
* Technically this method in many senses is equivalent to executing empty
* script:
* 1. It does nothing except for processing all pending debug messages.
@ -305,11 +255,6 @@ class V8_EXPORT Debug {
* unexpectedly used. LiveEdit is enabled by default.
*/
static void SetLiveEditEnabled(Isolate* isolate, bool enable);
// TODO(svenpanne) Remove this when Chrome is updated.
static void SetLiveEditEnabled(bool enable, Isolate* isolate) {
SetLiveEditEnabled(isolate, enable);
}
};

9
deps/v8/include/v8-platform.h

@ -5,10 +5,10 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
#include "v8.h"
namespace v8 {
class Isolate;
/**
* A Task represents a unit of work.
*/
@ -37,6 +37,8 @@ class Platform {
kLongRunningTask
};
virtual ~Platform() {}
/**
* Schedules a task to be invoked on a background thread. |expected_runtime|
* indicates that the task will run a long time. The Platform implementation
@ -53,9 +55,6 @@ class Platform {
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
protected:
virtual ~Platform() {}
};
} // namespace v8

5
deps/v8/include/v8-profiler.h

@ -231,7 +231,8 @@ class V8_EXPORT HeapGraphNode {
kSynthetic = 9, // Synthetic object, usualy used for grouping
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11 // Sliced string. A fragment of another string.
kSlicedString = 11, // Sliced string. A fragment of another string.
kSymbol = 12 // A Symbol (ES6).
};
/** Returns node type (see HeapGraphNode::Type). */
@ -292,7 +293,7 @@ class V8_EXPORT OutputStream { // NOLINT
*/
virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
return kAbort;
};
}
};

13
deps/v8/include/v8-util.h

@ -154,7 +154,7 @@ class PersistentValueMap {
*/
bool SetReturnValue(const K& key,
ReturnValue<Value> returnValue) {
return SetReturnValueFromVal(returnValue, Traits::Get(&impl_, key));
return SetReturnValueFromVal(&returnValue, Traits::Get(&impl_, key));
}
/**
@ -227,7 +227,7 @@ class PersistentValueMap {
}
template<typename T>
bool SetReturnValue(ReturnValue<T> returnValue) {
return SetReturnValueFromVal(returnValue, value_);
return SetReturnValueFromVal(&returnValue, value_);
}
void Reset() {
value_ = kPersistentContainerNotFound;
@ -300,6 +300,7 @@ class PersistentValueMap {
K key = Traits::KeyFromWeakCallbackData(data);
Traits::Dispose(data.GetIsolate(),
persistentValueMap->Remove(key).Pass(), key);
Traits::DisposeCallbackData(data.GetParameter());
}
}
@ -308,10 +309,10 @@ class PersistentValueMap {
}
static bool SetReturnValueFromVal(
ReturnValue<Value>& returnValue, PersistentContainerValue value) {
ReturnValue<Value>* returnValue, PersistentContainerValue value) {
bool hasValue = value != kPersistentContainerNotFound;
if (hasValue) {
returnValue.SetInternal(
returnValue->SetInternal(
*reinterpret_cast<internal::Object**>(FromVal(value)));
}
return hasValue;
@ -337,7 +338,7 @@ class PersistentValueMap {
static UniquePersistent<V> Release(PersistentContainerValue v) {
UniquePersistent<V> p;
p.val_ = FromVal(v);
if (Traits::kCallbackType != kNotWeak && !p.IsEmpty()) {
if (Traits::kCallbackType != kNotWeak && p.IsWeak()) {
Traits::DisposeCallbackData(
p.template ClearWeak<typename Traits::WeakCallbackDataType>());
}
@ -422,7 +423,7 @@ class PersistentValueVector {
*/
void Append(UniquePersistent<V> persistent) {
Traits::Append(&impl_, ClearAndLeak(&persistent));
};
}
/**
* Are there any values in the vector?

429
deps/v8/include/v8.h

@ -895,6 +895,13 @@ struct Maybe {
};
// Convenience wrapper.
template <class T>
inline Maybe<T> maybe(T t) {
return Maybe<T>(t);
}
// --- Special objects ---
@ -916,20 +923,24 @@ class ScriptOrigin {
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
Handle<Integer> resource_column_offset = Handle<Integer>(),
Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>())
Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>(),
Handle<Integer> script_id = Handle<Integer>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { }
resource_is_shared_cross_origin_(resource_is_shared_cross_origin),
script_id_(script_id) { }
V8_INLINE Handle<Value> ResourceName() const;
V8_INLINE Handle<Integer> ResourceLineOffset() const;
V8_INLINE Handle<Integer> ResourceColumnOffset() const;
V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
V8_INLINE Handle<Integer> ScriptID() const;
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
Handle<Integer> resource_column_offset_;
Handle<Boolean> resource_is_shared_cross_origin_;
Handle<Integer> script_id_;
};
@ -946,6 +957,15 @@ class V8_EXPORT UnboundScript {
int GetId();
Handle<Value> GetScriptName();
/**
* Data read from magic sourceURL comments.
*/
Handle<Value> GetSourceURL();
/**
* Data read from magic sourceMappingURL comments.
*/
Handle<Value> GetSourceMappingURL();
/**
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
@ -984,24 +1004,9 @@ class V8_EXPORT Script {
*/
Local<UnboundScript> GetUnboundScript();
// To be deprecated; use GetUnboundScript()->GetId();
int GetId() {
return GetUnboundScript()->GetId();
}
// Use GetUnboundScript()->GetId();
V8_DEPRECATED("Use GetUnboundScript()->GetId()",
Handle<Value> GetScriptName()) {
return GetUnboundScript()->GetScriptName();
}
/**
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
*/
V8_DEPRECATED("Use GetUnboundScript()->GetLineNumber()",
int GetLineNumber(int code_pos)) {
return GetUnboundScript()->GetLineNumber(code_pos);
int GetId()) {
return GetUnboundScript()->GetId();
}
};
@ -1046,8 +1051,7 @@ class V8_EXPORT ScriptCompiler {
};
/**
* Source code which can be then compiled to a UnboundScript or
* BoundScript.
* Source code which can be then compiled to a UnboundScript or Script.
*/
class Source {
public:
@ -1077,19 +1081,31 @@ class V8_EXPORT ScriptCompiler {
Handle<Integer> resource_column_offset;
Handle<Boolean> resource_is_shared_cross_origin;
// Cached data from previous compilation (if any), or generated during
// compilation (if the generate_cached_data flag is passed to
// ScriptCompiler).
// Cached data from previous compilation (if a kConsume*Cache flag is
// set), or hold newly generated cache data (kProduce*Cache flags) are
// set when calling a compile method.
CachedData* cached_data;
};
enum CompileOptions {
kNoCompileOptions,
kProduceDataToCache = 1 << 0
kNoCompileOptions = 0,
kProduceParserCache,
kConsumeParserCache,
kProduceCodeCache,
kConsumeCodeCache,
// Support the previous API for a transition period.
kProduceDataToCache
};
/**
* Compiles the specified script (context-independent).
* Cached data as part of the source object can be optionally produced to be
* consumed later to speed up compilation of identical source scripts.
*
* Note that when producing cached data, the source must point to NULL for
* cached data. When consuming cached data, the cached data must have been
* produced by the same version of V8.
*
* \param source Script source code.
* \return Compiled script object (context independent; for running it must be
@ -1124,6 +1140,12 @@ class V8_EXPORT Message {
Local<String> Get() const;
Local<String> GetSourceLine() const;
/**
* Returns the origin for the script from where the function causing the
* error originates.
*/
ScriptOrigin GetScriptOrigin() const;
/**
* Returns the resource name for the script from where the function causing
* the error originates.
@ -1201,6 +1223,7 @@ class V8_EXPORT StackTrace {
kIsConstructor = 1 << 5,
kScriptNameOrSourceURL = 1 << 6,
kScriptId = 1 << 7,
kExposeFramesAcrossSecurityOrigins = 1 << 8,
kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
};
@ -2071,11 +2094,7 @@ typedef void (*AccessorSetterCallback)(
* accessors have an explicit access control parameter which specifies
* the kind of cross-context access that should be allowed.
*
* Additionally, for security, accessors can prohibit overwriting by
* accessors defined in JavaScript. For objects that have such
* accessors either locally or in their prototype chain it is not
* possible to overwrite the accessor by using __defineGetter__ or
* __defineSetter__ from JavaScript code.
* TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused.
*/
enum AccessControl {
DEFAULT = 0,
@ -2090,13 +2109,11 @@ enum AccessControl {
*/
class V8_EXPORT Object : public Value {
public:
bool Set(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
bool Set(Handle<Value> key, Handle<Value> value);
bool Set(uint32_t index, Handle<Value> value);
// Sets a local property on this object bypassing interceptors and
// Sets an own property on this object bypassing interceptors and
// overriding accessors or read-only properties.
//
// Note that if the object has an interceptor the property will be set
@ -2119,6 +2136,11 @@ class V8_EXPORT Object : public Value {
*/
PropertyAttribute GetPropertyAttributes(Handle<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
*/
Local<Value> GetOwnPropertyDescriptor(Local<String> key);
bool Has(Handle<Value> key);
bool Delete(Handle<Value> key);
@ -2203,12 +2225,6 @@ class V8_EXPORT Object : public Value {
*/
Local<String> ObjectProtoToString();
/**
* Returns the function invoked as a constructor for this object.
* May be the null value.
*/
Local<Value> GetConstructor();
/**
* Returns the name of the function invoked as a constructor for this object.
*/
@ -2429,6 +2445,10 @@ class ReturnValue {
// Convenience getter for Isolate
V8_INLINE Isolate* GetIsolate();
// Pointer setter: Uncompilable to prevent inadvertent misuse.
template <typename S>
V8_INLINE void Set(S* whatever);
private:
template<class F> friend class ReturnValue;
template<class F> friend class FunctionCallbackInfo;
@ -2629,6 +2649,7 @@ class V8_EXPORT Promise : public Object {
*/
Local<Promise> Chain(Handle<Function> handler);
Local<Promise> Catch(Handle<Function> handler);
Local<Promise> Then(Handle<Function> handler);
V8_INLINE static Promise* Cast(Value* obj);
@ -3865,8 +3886,8 @@ class V8_EXPORT ResourceConstraints {
uint64_t virtual_memory_limit,
uint32_t number_of_processors);
int max_new_space_size() const { return max_new_space_size_; }
void set_max_new_space_size(int value) { max_new_space_size_ = value; }
int max_semi_space_size() const { return max_semi_space_size_; }
void set_max_semi_space_size(int value) { max_semi_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
int max_executable_size() const { return max_executable_size_; }
@ -3879,18 +3900,18 @@ class V8_EXPORT ResourceConstraints {
void set_max_available_threads(int value) {
max_available_threads_ = value;
}
int code_range_size() const { return code_range_size_; }
void set_code_range_size(int value) {
size_t code_range_size() const { return code_range_size_; }
void set_code_range_size(size_t value) {
code_range_size_ = value;
}
private:
int max_new_space_size_;
int max_semi_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
int max_available_threads_;
int code_range_size_;
size_t code_range_size_;
};
@ -3965,6 +3986,9 @@ typedef void (*MemoryAllocationCallback)(ObjectSpace space,
// --- Leave Script Callback ---
typedef void (*CallCompletedCallback)();
// --- Microtask Callback ---
typedef void (*MicrotaskCallback)(void* data);
// --- Failed Access Check Callback ---
typedef void (*FailedAccessCheckCallback)(Local<Object> target,
AccessType type,
@ -4133,6 +4157,20 @@ class V8_EXPORT Isolate {
kMinorGarbageCollection
};
/**
* Features reported via the SetUseCounterCallback callback. Do not chang
* assigned numbers of existing items; add new features to the end of this
* list.
*/
enum UseCounterFeature {
kUseAsm = 0,
kUseCounterFeatureCount // This enum value must be last.
};
typedef void (*UseCounterCallback)(Isolate* isolate,
UseCounterFeature feature);
/**
* Creates a new isolate. Does not change the currently entered
* isolate.
@ -4211,7 +4249,8 @@ class V8_EXPORT Isolate {
* kept alive by JavaScript objects.
* \returns the adjusted value.
*/
int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
V8_INLINE int64_t
AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
/**
* Returns heap profiler for this isolate. Will return NULL until the isolate
@ -4375,6 +4414,7 @@ class V8_EXPORT Isolate {
/**
* Experimental: Runs the Microtask Work Queue until empty
* Any exceptions thrown by microtask callbacks are swallowed.
*/
void RunMicrotasks();
@ -4383,12 +4423,71 @@ class V8_EXPORT Isolate {
*/
void EnqueueMicrotask(Handle<Function> microtask);
/**
* Experimental: Enqueues the callback to the Microtask Work Queue
*/
void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*/
void SetAutorunMicrotasks(bool autorun);
/**
* Experimental: Returns whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*/
bool WillAutorunMicrotasks() const;
/**
* Sets a callback for counting the number of times a feature of V8 is used.
*/
void SetUseCounterCallback(UseCounterCallback callback);
/**
* Enables the host application to provide a mechanism for recording
* statistics counters.
*/
void SetCounterFunction(CounterLookupCallback);
/**
* Enables the host application to provide a mechanism for recording
* histograms. The CreateHistogram function returns a
* histogram which will later be passed to the AddHistogramSample
* function.
*/
void SetCreateHistogramFunction(CreateHistogramCallback);
void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
/**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*
* The idle_time_in_ms argument specifies the time V8 has to do reduce
* the memory footprint. There is no guarantee that the actual work will be
* done within the time limit.
*/
bool IdleNotification(int idle_time_in_ms);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to attempt to free memory.
*/
void LowMemoryNotification();
/**
* Optional notification that a context has been disposed. V8 uses
* these notifications to guide the GC heuristic. Returns the number
* of context disposals - including this one - since the last time
* V8 had a chance to clean up.
*/
int ContextDisposedNotification();
private:
template<class K, class V, class Traits> friend class PersistentValueMap;
@ -4402,6 +4501,7 @@ class V8_EXPORT Isolate {
void SetObjectGroupId(internal::Object** object, UniqueId id);
void SetReferenceFromGroup(UniqueId id, internal::Object** object);
void SetReference(internal::Object** parent, internal::Object** child);
void CollectAllGarbage(const char* gc_reason);
};
class V8_EXPORT StartupData {
@ -4512,7 +4612,7 @@ struct JitCodeEvent {
// Size of the instructions.
size_t code_len;
// Script info for CODE_ADDED event.
Handle<Script> script;
Handle<UnboundScript> script;
// User-defined data for *_LINE_INFO_* event. It's used to hold the source
// code line information which is returned from the
// CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
@ -4640,6 +4740,24 @@ class V8_EXPORT V8 {
static void GetCompressedStartupData(StartupData* compressed_data);
static void SetDecompressedStartupData(StartupData* decompressed_data);
/**
* Hand startup data to V8, in case the embedder has chosen to build
* V8 with external startup data.
*
* Note:
* - By default the startup data is linked into the V8 library, in which
* case this function is not meaningful.
* - If this needs to be called, it needs to be called before V8
* tries to make use of its built-ins.
* - To avoid unnecessary copies of data, V8 will point directly into the
* given data blob, so pretty please keep it around until V8 exit.
* - Compression of the startup blob might be useful, but needs to
* handled entirely on the embedders' side.
* - The call will abort if the data is invalid.
*/
static void SetNativesDataBlob(StartupData* startup_blob);
static void SetSnapshotDataBlob(StartupData* startup_blob);
/**
* Adds a message listener.
*
@ -4681,21 +4799,6 @@ class V8_EXPORT V8 {
/** Get the version string. */
static const char* GetVersion();
/**
* Enables the host application to provide a mechanism for recording
* statistics counters.
*/
static void SetCounterFunction(CounterLookupCallback);
/**
* Enables the host application to provide a mechanism for recording
* histograms. The CreateHistogram function returns a
* histogram which will later be passed to the AddHistogramSample
* function.
*/
static void SetCreateHistogramFunction(CreateHistogramCallback);
static void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
/** Callback function for reporting failed access checks.*/
static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
@ -4750,28 +4853,6 @@ class V8_EXPORT V8 {
*/
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/**
* Experimental: Runs the Microtask Work Queue until empty
*
* Deprecated: Use methods on Isolate instead.
*/
static void RunMicrotasks(Isolate* isolate);
/**
* Experimental: Enqueues the callback to the Microtask Work Queue
*
* Deprecated: Use methods on Isolate instead.
*/
static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*
* Deprecated: Use methods on Isolate instead.
*/
static void SetAutorunMicrotasks(Isolate *source, bool autorun);
/**
* Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if
@ -4906,34 +4987,6 @@ class V8_EXPORT V8 {
static void VisitHandlesForPartialDependence(
Isolate* isolate, PersistentHandleVisitor* visitor);
/**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
* Returns true if the embedder should stop calling IdleNotification
* until real work has been done. This indicates that V8 has done
* as much cleanup as it will be able to do.
*
* The hint argument specifies the amount of work to be done in the function
* on scale from 1 to 1000. There is no guarantee that the actual work will
* match the hint.
*/
static bool IdleNotification(int hint = 1000);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to attempt to free memory.
*/
static void LowMemoryNotification();
/**
* Optional notification that a context has been disposed. V8 uses
* these notifications to guide the GC heuristic. Returns the number
* of context disposals - including this one - since the last time
* V8 had a chance to clean up.
*/
static int ContextDisposedNotification();
/**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
@ -5061,7 +5114,8 @@ class V8_EXPORT TryCatch {
/**
* Clears any exceptions that may have been caught by this try/catch block.
* After this method has been called, HasCaught() will return false.
* After this method has been called, HasCaught() will return false. Cancels
* the scheduled exception if it is caught and ReThrow() is not called before.
*
* It is not necessary to clear a try/catch block before using it again; if
* another exception is thrown the previously caught exception will just be
@ -5087,7 +5141,25 @@ class V8_EXPORT TryCatch {
*/
void SetCaptureMessage(bool value);
/**
* There are cases when the raw address of C++ TryCatch object cannot be
* used for comparisons with addresses into the JS stack. The cases are:
* 1) ARM, ARM64 and MIPS simulators which have separate JS stack.
* 2) Address sanitizer allocates local C++ object in the heap when
* UseAfterReturn mode is enabled.
* This method returns address that can be used for comparisons with
* addresses into the JS stack. When neither simulator nor ASAN's
* UseAfterReturn is enabled, then the address returned will be the address
* of the C++ try catch handler itself.
*/
static void* JSStackComparableAddress(v8::TryCatch* handler) {
if (handler == NULL) return NULL;
return handler->js_stack_comparable_address_;
}
private:
void ResetInternal();
// Make it hard to create heap-allocated TryCatch blocks.
TryCatch(const TryCatch&);
void operator=(const TryCatch&);
@ -5095,10 +5167,11 @@ class V8_EXPORT TryCatch {
void operator delete(void*, size_t);
v8::internal::Isolate* isolate_;
void* next_;
v8::TryCatch* next_;
void* exception_;
void* message_obj_;
void* message_script_;
void* js_stack_comparable_address_;
int message_start_pos_;
int message_end_pos_;
bool is_verbose_ : 1;
@ -5208,14 +5281,6 @@ class V8_EXPORT Context {
*/
void Exit();
/**
* Returns true if the context has experienced an out of memory situation.
* Since V8 always treats OOM as fatal error, this can no longer return true.
* Therefore this is now deprecated.
* */
V8_DEPRECATED("This can no longer happen. OOM is a fatal error.",
bool HasOutOfMemoryException()) { return false; }
/** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate();
@ -5435,6 +5500,7 @@ namespace internal {
const int kApiPointerSize = sizeof(void*); // NOLINT
const int kApiIntSize = sizeof(int); // NOLINT
const int kApiInt64Size = sizeof(int64_t); // NOLINT
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
@ -5460,7 +5526,7 @@ V8_INLINE internal::Object* IntToSmi(int value) {
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
V8_INLINE static int SmiToInt(internal::Object* value) {
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
@ -5488,7 +5554,7 @@ template <> struct SmiTagging<4> {
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
V8_INLINE static int SmiToInt(internal::Object* value) {
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
@ -5518,7 +5584,8 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kMapInstanceTypeAndBitFieldOffset =
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 3 * kApiPointerSize;
@ -5526,19 +5593,29 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 74;
static const int kContextEmbedderDataIndex = 95;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalAsciiRepresentationTag = 0x06;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kIsolateRootsOffset = 5 * kApiPointerSize;
static const int kAmountOfExternalAllocatedMemoryOffset =
4 * kApiPointerSize;
static const int kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset =
kAmountOfExternalAllocatedMemoryOffset + kApiInt64Size;
static const int kIsolateRootsOffset =
kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset + kApiInt64Size +
kApiPointerSize;
static const int kUndefinedValueRootIndex = 5;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 162;
static const int kEmptyStringRootIndex = 164;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
static const int kExternalAllocationLimit = 192 * 1024 * 1024;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -5549,10 +5626,10 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
static const int kJSObjectType = 0xbb;
static const int kJSObjectType = 0xbc;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
static const int kForeignType = 0x88;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@ -5566,12 +5643,12 @@ class Internals {
#endif
}
V8_INLINE static bool HasHeapObjectTag(internal::Object* value) {
V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
V8_INLINE static int SmiValue(internal::Object* value) {
V8_INLINE static int SmiValue(const internal::Object* value) {
return PlatformSmiTagging::SmiToInt(value);
}
@ -5583,13 +5660,15 @@ class Internals {
return PlatformSmiTagging::IsValidSmi(value);
}
V8_INLINE static int GetInstanceType(internal::Object* obj) {
V8_INLINE static int GetInstanceType(const internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
// Map::InstanceType is defined so that it will always be loaded into
// the LS 8 bits of one 16-bit word, regardless of endianess.
return ReadField<uint16_t>(map, kMapInstanceTypeAndBitFieldOffset) & 0xff;
}
V8_INLINE static int GetOddballKind(internal::Object* obj) {
V8_INLINE static int GetOddballKind(const internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
@ -5622,18 +5701,19 @@ class Internals {
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE static void SetEmbedderData(v8::Isolate *isolate,
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
uint32_t slot,
void *data) {
void* data) {
uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
uint32_t slot) {
const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
return *reinterpret_cast<void**>(addr);
return *reinterpret_cast<void* const*>(addr);
}
V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate,
@ -5642,16 +5722,18 @@ class Internals {
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T> V8_INLINE static T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
template <typename T>
V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
const uint8_t* addr =
reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<const T*>(addr);
}
template <typename T>
V8_INLINE static T ReadEmbedderData(Context* context, int index) {
V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* ctx = *reinterpret_cast<O**>(context);
O* ctx = *reinterpret_cast<O* const*>(context);
int embedder_data_offset = I::kContextHeaderSize +
(internal::kApiPointerSize * I::kContextEmbedderDataIndex);
O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
@ -5659,14 +5741,6 @@ class Internals {
I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<T>(embedder_data, value_offset);
}
V8_INLINE static bool CanCastToHeapObject(void* o) { return false; }
V8_INLINE static bool CanCastToHeapObject(Context* o) { return true; }
V8_INLINE static bool CanCastToHeapObject(String* o) { return true; }
V8_INLINE static bool CanCastToHeapObject(Object* o) { return true; }
V8_INLINE static bool CanCastToHeapObject(Message* o) { return true; }
V8_INLINE static bool CanCastToHeapObject(StackTrace* o) { return true; }
V8_INLINE static bool CanCastToHeapObject(StackFrame* o) { return true; }
};
} // namespace internal
@ -5973,6 +6047,13 @@ Isolate* ReturnValue<T>::GetIsolate() {
return *reinterpret_cast<Isolate**>(&value_[-2]);
}
template<typename T>
template<typename S>
void ReturnValue<T>::Set(S* whatever) {
// Uncompilable to prevent inadvertent misuse.
TYPE_CHECK(S*, Primitive);
}
template<typename T>
internal::Object* ReturnValue<T>::GetDefaultValue() {
// Default value is always the pointer below value_ on the stack.
@ -6062,11 +6143,17 @@ Handle<Integer> ScriptOrigin::ResourceColumnOffset() const {
return resource_column_offset_;
}
Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
return resource_is_shared_cross_origin_;
}
Handle<Integer> ScriptOrigin::ScriptID() const {
return script_id_;
}
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
CachedData* data)
: source_string(string),
@ -6158,7 +6245,7 @@ Local<String> String::Empty(Isolate* isolate) {
String::ExternalStringResource* String::GetExternalStringResource() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
O* obj = *reinterpret_cast<O* const*>(this);
String::ExternalStringResource* result;
if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
@ -6177,7 +6264,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
String::Encoding* encoding_out) const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
O* obj = *reinterpret_cast<O* const*>(this);
int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource = NULL;
@ -6204,7 +6291,7 @@ bool Value::IsUndefined() const {
bool Value::QuickIsUndefined() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
O* obj = *reinterpret_cast<O* const*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
@ -6222,7 +6309,7 @@ bool Value::IsNull() const {
bool Value::QuickIsNull() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
O* obj = *reinterpret_cast<O* const*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
if (I::GetInstanceType(obj) != I::kOddballType) return false;
return (I::GetOddballKind(obj) == I::kNullOddballKind);
@ -6240,7 +6327,7 @@ bool Value::IsString() const {
bool Value::QuickIsString() const {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
O* obj = *reinterpret_cast<O* const*>(this);
if (!I::HasHeapObjectTag(obj)) return false;
return (I::GetInstanceType(obj) < I::kFirstNonstringType);
}
@ -6559,6 +6646,28 @@ uint32_t Isolate::GetNumberOfDataSlots() {
}
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
typedef internal::Internals I;
int64_t* amount_of_external_allocated_memory =
reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) +
I::kAmountOfExternalAllocatedMemoryOffset);
int64_t* amount_of_external_allocated_memory_at_last_global_gc =
reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) +
I::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
int64_t amount = *amount_of_external_allocated_memory + change_in_bytes;
if (change_in_bytes > 0 &&
amount - *amount_of_external_allocated_memory_at_last_global_gc >
I::kExternalAllocationLimit) {
CollectAllGarbage("external memory allocation limit reached.");
} else {
*amount_of_external_allocated_memory = amount;
}
return *amount_of_external_allocated_memory;
}
template<typename T>
void Isolate::SetObjectGroupId(const Persistent<T>& object,
UniqueId id) {

80
deps/v8/samples/lineprocessor.cc

@ -25,14 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <include/v8.h>
#include <v8-debug.h>
#include <include/libplatform/libplatform.h>
#include <include/v8-debug.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/**
* This sample program should demonstrate certain aspects of debugging
@ -69,25 +70,6 @@ while (true) {
var res = line + " | " + line;
print(res);
}
*
* When run with "-p" argument, the program starts V8 Debugger Agent and
* allows remote debugger to attach and debug JavaScript code.
*
* Interesting aspects:
* 1. Wait for remote debugger to attach
* Normally the program compiles custom script and immediately runs it.
* If programmer needs to debug script from the very beginning, he should
* run this sample program with "--wait-for-connection" command line parameter.
* This way V8 will suspend on the first statement and wait for
* debugger to attach.
*
* 2. Unresponsive V8
* V8 Debugger Agent holds a connection with remote debugger, but it does
* respond only when V8 is running some script. In particular, when this program
* is waiting for input, all requests from debugger get deferred until V8
* is called again. See how "--callback" command-line parameter in this sample
* fixes this issue.
*/
enum MainCycleType {
@ -109,41 +91,16 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Persistent<v8::Context> debug_message_context;
void DispatchDebugMessages() {
// We are in some random thread. We should already have v8::Locker acquired
// (we requested this when registered this callback). We was called
// because new debug messages arrived; they may have already been processed,
// but we shouldn't worry about this.
//
// All we have to do is to set context and call ProcessDebugMessages.
//
// We should decide which V8 context to use here. This is important for
// "evaluate" command, because it must be executed some context.
// In our sample we have only one context, so there is nothing really to
// think about.
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, debug_message_context);
v8::Context::Scope scope(context);
v8::Debug::ProcessDebugMessages();
}
int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::Isolate* isolate = v8::Isolate::New();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::String> script_source;
v8::Handle<v8::Value> script_name;
int script_param_counter = 0;
int port_number = -1;
bool wait_for_connection = false;
bool support_callback = false;
MainCycleType cycle_type = CycleInCpp;
for (int i = 1; i < argc; i++) {
@ -156,13 +113,6 @@ int RunMain(int argc, char* argv[]) {
cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
} else if (strcmp(str, "--callback") == 0) {
support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
wait_for_connection = true;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); // NOLINT
i++;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@ -212,16 +162,6 @@ int RunMain(int argc, char* argv[]) {
debug_message_context.Reset(isolate, context);
v8::Locker locker(isolate);
if (support_callback) {
v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
}
if (port_number != -1) {
v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
}
bool report_exceptions = true;
v8::Handle<v8::Script> script;
@ -265,7 +205,6 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions) {
v8::Isolate* isolate = context->GetIsolate();
v8::Locker lock(isolate);
v8::Handle<v8::String> fun_name =
v8::String::NewFromUtf8(isolate, "ProcessLine");
@ -316,8 +255,12 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
int result = RunMain(argc, argv);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
return result;
}
@ -362,7 +305,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
printf("%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
@ -423,7 +366,6 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
v8::Unlocker unlocker(v8::Isolate::GetCurrent());
res = fgets(buffer, kBufferSize, stdin);
}
v8::Isolate* isolate = v8::Isolate::GetCurrent();

17
deps/v8/samples/process.cc

@ -25,10 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <include/v8.h>
#include <include/libplatform/libplatform.h>
#include <string>
#include <map>
#include <string>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
@ -574,7 +576,7 @@ StringHttpRequest::StringHttpRequest(const string& path,
void ParseOptions(int argc,
char* argv[],
map<string, string>& options,
map<string, string>* options,
string* file) {
for (int i = 1; i < argc; i++) {
string arg = argv[i];
@ -584,7 +586,7 @@ void ParseOptions(int argc,
} else {
string key = arg.substr(0, index);
string value = arg.substr(index+1);
options[key] = value;
(*options)[key] = value;
}
}
}
@ -644,14 +646,17 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
map<string, string> options;
string file;
ParseOptions(argc, argv, options, &file);
ParseOptions(argc, argv, &options, &file);
if (file.empty()) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
Isolate* isolate = Isolate::GetCurrent();
Isolate* isolate = Isolate::New();
Isolate::Scope isolate_scope(isolate);
HandleScope scope(isolate);
Handle<String> source = ReadFile(isolate, file);
if (source.IsEmpty()) {

3
deps/v8/samples/samples.gyp

@ -35,9 +35,10 @@
'type': 'executable',
'dependencies': [
'../tools/gyp/v8.gyp:v8',
'../tools/gyp/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../include',
'..',
],
'conditions': [
['v8_enable_i18n_support==1', {

32
deps/v8/samples/shell.cc

@ -25,12 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <v8.h>
#include <include/v8.h>
#include <include/libplatform/libplatform.h>
#include <assert.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
@ -65,25 +68,42 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
static bool run_shell;
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
v8::Isolate* isolate = v8::Isolate::New();
run_shell = (argc == 1);
int result;
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::Context> context = CreateShellContext(isolate);
if (context.IsEmpty()) {
fprintf(stderr, "Error creating context\n");
return 1;
}
context->Enter();
v8::Context::Scope context_scope(context);
result = RunMain(isolate, argc, argv);
if (run_shell) RunShell(context);
context->Exit();
}
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
return result;
}
@ -345,7 +365,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
fprintf(stderr, "%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);

13
deps/v8/src/DEPS

@ -0,0 +1,13 @@
include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
"-src/libplatform",
"-include/libplatform",
]
specific_include_rules = {
"(mksnapshot|d8)\.cc": [
"+include/libplatform/libplatform.h",
],
}

368
deps/v8/src/accessors.cc

@ -2,34 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "accessors.h"
#include "compiler.h"
#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
#include "frames-inl.h"
#include "isolate.h"
#include "list-inl.h"
#include "property-details.h"
#include "api.h"
#include "src/v8.h"
#include "src/accessors.h"
#include "src/api.h"
#include "src/compiler.h"
#include "src/contexts.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/frames-inl.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/property-details.h"
#include "src/prototype.h"
namespace v8 {
namespace internal {
// We have a slight impedance mismatch between the external API and the way we
// use callbacks internally: Externally, callbacks can only be used with
// v8::Object, but internally we even have callbacks on entities which are
// higher in the hierarchy, so we can only return i::Object here, not
// i::JSObject.
Handle<Object> GetThisFrom(const v8::PropertyCallbackInfo<v8::Value>& info) {
return Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
}
Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate,
Handle<String> name,
@ -41,7 +32,6 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_prohibits_overwriting(false);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
Handle<Object> set = v8::FromCData(isolate, setter);
@ -51,20 +41,37 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
}
Handle<ExecutableAccessorInfo> Accessors::CloneAccessor(
Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor) {
Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_name(accessor->name());
info->set_flag(accessor->flag());
info->set_expected_receiver_type(accessor->expected_receiver_type());
info->set_getter(accessor->getter());
info->set_setter(accessor->setter());
info->set_data(accessor->data());
return info;
}
template <class C>
static C* FindInstanceOf(Isolate* isolate, Object* obj) {
for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
if (Is<C>(cur)) return C::cast(cur);
for (PrototypeIterator iter(isolate, obj,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (Is<C>(iter.GetCurrent())) return C::cast(iter.GetCurrent());
}
return NULL;
}
static V8_INLINE bool CheckForName(Handle<String> name,
static V8_INLINE bool CheckForName(Handle<Name> name,
Handle<String> property_name,
int offset,
int* object_offset) {
if (String::Equals(name, property_name)) {
if (Name::Equals(name, property_name)) {
*object_offset = offset;
return true;
}
@ -76,7 +83,7 @@ static V8_INLINE bool CheckForName(Handle<String> name,
// If true, *object_offset contains offset of object field.
template <class T>
bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
Handle<Name> name,
int* object_offset) {
Isolate* isolate = name->GetIsolate();
@ -119,16 +126,35 @@ bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
template
bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
Handle<String> name,
Handle<Name> name,
int* object_offset);
template
bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
Handle<String> name,
Handle<Name> name,
int* object_offset);
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::String> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false;
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
// This behaves sloppy since we lost the actual strict-mode.
// TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
// properties.
if (!object->map()->is_extensible()) return true;
JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
value, NONE).Check();
}
return true;
}
//
// Accessors::ArrayLength
//
@ -139,10 +165,9 @@ Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
has_initial_map());
if (wrapper->map() ==
isolate->context()->native_context()->number_function()->initial_map()) {
if (wrapper->map() == isolate->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}
@ -156,15 +181,8 @@ void Accessors::ArrayLengthGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *GetThisFrom(info);
// Traverse the prototype chain until we reach an array.
JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
Object* result;
if (holder != NULL) {
result = holder->length();
} else {
result = Smi::FromInt(0);
}
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
Object* result = holder->length();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@ -175,17 +193,9 @@ void Accessors::ArrayLengthSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object = Handle<JSObject>::cast(
Utils::OpenHandle(*info.This()));
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
MaybeHandle<Object> maybe_result =
JSObject::SetLocalPropertyIgnoreAttributes(
object, isolate->factory()->length_string(), value, NONE);
maybe_result.Check();
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
@ -239,16 +249,19 @@ void Accessors::StringLengthGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* value = *GetThisFrom(info);
Object* result;
if (value->IsJSValue()) value = JSValue::cast(value)->value();
if (value->IsString()) {
result = Smi::FromInt(String::cast(value)->length());
} else {
// If object is not a string we return 0 to be compatible with WebKit.
// Note: Firefox returns the length of ToString(object).
result = Smi::FromInt(0);
// We have a slight impedance mismatch between the external API and the way we
// use callbacks internally: Externally, callbacks can only be used with
// v8::Object, but internally we have callbacks on entities which are higher
// in the hierarchy, in this case for String values.
Object* value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
if (!value->IsString()) {
// Not a string value. That means that we either got a String wrapper or
// a Value with a String wrapper in its prototype chain.
value = JSValue::cast(*Utils::OpenHandle(*info.Holder()))->value();
}
Object* result = Smi::FromInt(String::cast(value)->length());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@ -541,10 +554,10 @@ void Accessors::ScriptLineEndsGetter(
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Script::InitLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray());
DCHECK(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
DCHECK(*line_ends == isolate->heap()->empty_fixed_array() ||
line_ends->map() == isolate->heap()->fixed_cow_array_map());
Handle<JSArray> js_array =
isolate->factory()->NewJSArrayWithElements(line_ends);
@ -572,6 +585,77 @@ Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
}
//
// Accessors::ScriptSourceUrl
//
void Accessors::ScriptSourceUrlGetter(
v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
void Accessors::ScriptSourceUrlSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
}
Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate,
isolate->factory()->source_url_string(),
&ScriptSourceUrlGetter,
&ScriptSourceUrlSetter,
attributes);
}
//
// Accessors::ScriptSourceMappingUrl
//
void Accessors::ScriptSourceMappingUrlGetter(
v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* url =
Script::cast(JSValue::cast(object)->value())->source_mapping_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
void Accessors::ScriptSourceMappingUrlSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE();
}
Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate,
isolate->factory()->source_mapping_url_string(),
&ScriptSourceMappingUrlGetter,
&ScriptSourceMappingUrlSetter,
attributes);
}
//
// Accessors::ScriptGetContextData
//
@ -753,21 +837,7 @@ Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
//
static Handle<Object> GetFunctionPrototype(Isolate* isolate,
Handle<Object> receiver) {
Handle<JSFunction> function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
if (function_raw == NULL) return isolate->factory()->undefined_value();
while (!function_raw->should_have_prototype()) {
function_raw = FindInstanceOf<JSFunction>(isolate,
function_raw->GetPrototype());
// There has to be one because we hit the getter.
ASSERT(function_raw != NULL);
}
function = Handle<JSFunction>(function_raw, isolate);
}
Handle<JSFunction> function) {
if (!function->has_prototype()) {
Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
JSFunction::SetPrototype(function, proto);
@ -777,26 +847,10 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
static Handle<Object> SetFunctionPrototype(Isolate* isolate,
Handle<JSObject> receiver,
Handle<JSFunction> function,
Handle<Object> value) {
Handle<JSFunction> function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
if (function_raw == NULL) return isolate->factory()->undefined_value();
function = Handle<JSFunction>(function_raw, isolate);
}
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
MaybeHandle<Object> maybe_result =
JSObject::SetLocalPropertyIgnoreAttributes(
receiver, isolate->factory()->prototype_string(), value, NONE);
return maybe_result.ToHandleChecked();
}
Handle<Object> old_value;
bool is_observed = *function == *receiver && function->map()->is_observed();
bool is_observed = function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
@ -805,7 +859,7 @@ static Handle<Object> SetFunctionPrototype(Isolate* isolate,
}
JSFunction::SetPrototype(function, value);
ASSERT(function->prototype() == *value);
DCHECK(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
@ -823,7 +877,7 @@ Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
ASSERT(function->should_have_prototype());
DCHECK(function->should_have_prototype());
Isolate* isolate = function->GetIsolate();
return SetFunctionPrototype(isolate, function, prototype);
}
@ -834,8 +888,9 @@ void Accessors::FunctionPrototypeGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = GetThisFrom(info);
Handle<Object> result = GetFunctionPrototype(isolate, object);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result = GetFunctionPrototype(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -846,10 +901,12 @@ void Accessors::FunctionPrototypeSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
SetFunctionPrototype(isolate, object, value);
}
@ -874,18 +931,10 @@ void Accessors::FunctionLengthGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = GetThisFrom(info);
MaybeHandle<JSFunction> maybe_function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
if (function != NULL) maybe_function = Handle<JSFunction>(function);
}
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
int length = 0;
Handle<JSFunction> function;
if (maybe_function.ToHandle(&function)) {
if (function->shared()->is_compiled()) {
length = function->shared()->length();
} else {
@ -898,7 +947,6 @@ void Accessors::FunctionLengthGetter(
isolate->OptionalRescheduleException(false);
}
}
}
Handle<Object> result(Smi::FromInt(length), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -908,7 +956,8 @@ void Accessors::FunctionLengthSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Do nothing.
// Function length is non writable, non configurable.
UNREACHABLE();
}
@ -932,22 +981,9 @@ void Accessors::FunctionNameGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = GetThisFrom(info);
MaybeHandle<JSFunction> maybe_function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
if (function != NULL) maybe_function = Handle<JSFunction>(function);
}
Handle<JSFunction> function;
Handle<Object> result;
if (maybe_function.ToHandle(&function)) {
result = Handle<Object>(function->shared()->name(), isolate);
} else {
result = isolate->factory()->undefined_value();
}
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result(function->shared()->name(), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -956,7 +992,8 @@ void Accessors::FunctionNameSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Do nothing.
// Function name is non writable, non configurable.
UNREACHABLE();
}
@ -1058,7 +1095,7 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
arguments->set_elements(*array);
@ -1081,22 +1118,9 @@ void Accessors::FunctionArgumentsGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = GetThisFrom(info);
MaybeHandle<JSFunction> maybe_function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
if (function != NULL) maybe_function = Handle<JSFunction>(function);
}
Handle<JSFunction> function;
Handle<Object> result;
if (maybe_function.ToHandle(&function)) {
result = GetFunctionArguments(isolate, function);
} else {
result = isolate->factory()->undefined_value();
}
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result = GetFunctionArguments(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -1105,7 +1129,8 @@ void Accessors::FunctionArgumentsSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Do nothing.
// Function arguments is non writable, non configurable.
UNREACHABLE();
}
@ -1124,23 +1149,34 @@ Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
//
static inline bool AllowAccessToFunction(Context* current_context,
JSFunction* function) {
return current_context->HasSameSecurityTokenAs(function->context());
}
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: frame_iterator_(isolate),
: isolate_(isolate),
frame_iterator_(isolate),
functions_(2),
index_(0) {
GetFunctions();
}
JSFunction* next() {
while (true) {
if (functions_.length() == 0) return NULL;
JSFunction* next_function = functions_[index_];
index_--;
if (index_ < 0) {
GetFunctions();
}
// Skip functions from other origins.
if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
return next_function;
}
}
// Iterate through functions until the first occurence of 'function'.
// Returns true if 'function' is found, and false if the iterator ends
@ -1160,10 +1196,11 @@ class FrameFunctionIterator {
if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame();
frame->GetFunctions(&functions_);
ASSERT(functions_.length() > 0);
DCHECK(functions_.length() > 0);
frame_iterator_.Advance();
index_ = functions_.length() - 1;
}
Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_;
List<JSFunction*> functions_;
int index_;
@ -1211,6 +1248,10 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (caller->shared()->strict_mode() == STRICT) {
return MaybeHandle<JSFunction>();
}
// Don't return caller from another security context.
if (!AllowAccessToFunction(isolate->context(), caller)) {
return MaybeHandle<JSFunction>();
}
return Handle<JSFunction>(caller);
}
@ -1220,16 +1261,9 @@ void Accessors::FunctionCallerGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = GetThisFrom(info);
MaybeHandle<JSFunction> maybe_function;
{
DisallowHeapAllocation no_allocation;
JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
if (function != NULL) maybe_function = Handle<JSFunction>(function);
}
Handle<JSFunction> function;
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (maybe_function.ToHandle(&function)) {
MaybeHandle<JSFunction> maybe_caller;
maybe_caller = FindCaller(isolate, function);
Handle<JSFunction> caller;
@ -1238,9 +1272,6 @@ void Accessors::FunctionCallerGetter(
} else {
result = isolate->factory()->null_value();
}
} else {
result = isolate->factory()->undefined_value();
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -1249,7 +1280,8 @@ void Accessors::FunctionCallerSetter(
v8::Local<v8::String> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
// Do nothing.
// Function caller is non writable, non configurable.
UNREACHABLE();
}
@ -1272,7 +1304,7 @@ static void ModuleGetExport(
const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate();
@ -1293,7 +1325,7 @@ static void ModuleSetExport(
const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* old_value = context->get(slot);
if (old_value->IsTheHole()) {

13
deps/v8/src/accessors.h

@ -5,8 +5,8 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
#include "allocation.h"
#include "v8globals.h"
#include "src/allocation.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
@ -32,6 +32,8 @@ namespace internal {
V(ScriptName) \
V(ScriptSource) \
V(ScriptType) \
V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \
V(StringLength)
// Accessors contains all predefined proxy accessors.
@ -76,7 +78,7 @@ class Accessors : public AllStatic {
// If true, *object_offset contains offset of object field.
template <class T>
static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
Handle<String> name,
Handle<Name> name,
int* object_offset);
static Handle<AccessorInfo> MakeAccessor(
@ -86,6 +88,11 @@ class Accessors : public AllStatic {
AccessorSetterCallback setter,
PropertyAttributes attributes);
static Handle<ExecutableAccessorInfo> CloneAccessor(
Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor);
private:
// Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);

6
deps/v8/src/allocation-site-scopes.cc

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "allocation-site-scopes.h"
#include "src/allocation-site-scopes.h"
namespace v8 {
namespace internal {
@ -20,7 +20,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
static_cast<void*>(*scope_site));
}
} else {
ASSERT(!current().is_null());
DCHECK(!current().is_null());
scope_site = isolate()->factory()->NewAllocationSite();
if (FLAG_trace_creation_allocation_sites) {
PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
@ -31,7 +31,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
current()->set_nested_site(*scope_site);
update_current_site(*scope_site);
}
ASSERT(!scope_site.is_null());
DCHECK(!scope_site.is_null());
return scope_site;
}

14
deps/v8/src/allocation-site-scopes.h

@ -5,10 +5,10 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
#include "ast.h"
#include "handles.h"
#include "objects.h"
#include "zone.h"
#include "src/ast.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
@ -20,7 +20,7 @@ class AllocationSiteContext {
public:
explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
};
}
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
@ -75,7 +75,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
ASSERT(nested_site->IsAllocationSite());
DCHECK(nested_site->IsAllocationSite());
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
@ -85,7 +85,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
DCHECK(object.is_null() || *object == scope_site->transition_info());
}
bool ShouldCreateMemento(Handle<JSObject> object);

25
deps/v8/src/allocation-tracker.cc

@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#include "allocation-tracker.h"
#include "heap-snapshot-generator.h"
#include "frames-inl.h"
#include "src/allocation-tracker.h"
#include "src/frames-inl.h"
#include "src/heap-snapshot-generator.h"
namespace v8 {
namespace internal {
@ -55,15 +54,15 @@ void AllocationTraceNode::AddAllocation(unsigned size) {
void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
if (tracker != NULL) {
AllocationTracker::FunctionInfo* info =
tracker->function_info_list()[function_info_index_];
OS::Print("%s #%u", info->name, id_);
base::OS::Print("%s #%u", info->name, id_);
} else {
OS::Print("%u #%u", function_info_index_, id_);
base::OS::Print("%u #%u", function_info_index_, id_);
}
OS::Print("\n");
base::OS::Print("\n");
indent += 2;
for (int i = 0; i < children_.length(); i++) {
children_[i]->Print(indent, tracker);
@ -94,8 +93,8 @@ AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
void AllocationTraceTree::Print(AllocationTracker* tracker) {
OS::Print("[AllocationTraceTree:]\n");
OS::Print("Total size | Allocation count | Function id | id\n");
base::OS::Print("[AllocationTraceTree:]\n");
base::OS::Print("Total size | Allocation count | Function id | id\n");
root()->Print(0, tracker);
}
@ -229,8 +228,8 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
FreeListNode::FromAddress(addr)->set_size(heap, size);
ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
Isolate* isolate = heap->isolate();
int length = 0;

14
deps/v8/src/allocation.cc

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "allocation.h"
#include "src/allocation.h"
#include <stdlib.h> // For free, malloc.
#include "checks.h"
#include "platform.h"
#include "utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
@ -66,7 +66,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
OS::MemCopy(result, str, length);
MemCopy(result, str, length);
result[length] = '\0';
return result;
}
@ -76,14 +76,14 @@ char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
OS::MemCopy(result, str, length);
MemCopy(result, str, length);
result[length] = '\0';
return result;
}
void* AlignedAlloc(size_t size, size_t alignment) {
ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
DCHECK(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);

2
deps/v8/src/allocation.h

@ -5,7 +5,7 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
#include "globals.h"
#include "src/globals.h"
namespace v8 {
namespace internal {

1145
deps/v8/src/api.cc

File diff suppressed because it is too large

35
deps/v8/src/api.h

@ -5,13 +5,13 @@
#ifndef V8_API_H_
#define V8_API_H_
#include "v8.h"
#include "src/v8.h"
#include "../include/v8-testing.h"
#include "contexts.h"
#include "factory.h"
#include "isolate.h"
#include "list-inl.h"
#include "include/v8-testing.h"
#include "src/contexts.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list-inl.h"
namespace v8 {
@ -81,13 +81,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastObjectElements());
DCHECK(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastObjectElements());
DCHECK(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
@ -264,7 +264,7 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
ASSERT(obj.is_null() || !obj->IsTheHole());
DCHECK(obj.is_null() || !obj->IsTheHole());
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
@ -325,7 +325,7 @@ inline v8::Local<T> ToApiHandle(
#define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size) \
Local<v8::Type##Array> Utils::ToLocal##Type##Array( \
v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
ASSERT(obj->type() == kExternal##Type##Array); \
DCHECK(obj->type() == kExternal##Type##Array); \
return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj); \
}
@ -370,8 +370,7 @@ MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \
EXTRA_CHECK(that == NULL || \
(*reinterpret_cast<v8::internal::Object**>( \
const_cast<v8::From*>(that)))->Is##To()); \
(*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
}
@ -535,7 +534,7 @@ class HandleScopeImplementer {
Isolate* isolate() const { return isolate_; }
void ReturnBlock(Object** block) {
ASSERT(block != NULL);
DCHECK(block != NULL);
if (spare_ != NULL) DeleteArray(spare_);
spare_ = block;
}
@ -551,9 +550,9 @@ class HandleScopeImplementer {
}
void Free() {
ASSERT(blocks_.length() == 0);
ASSERT(entered_contexts_.length() == 0);
ASSERT(saved_contexts_.length() == 0);
DCHECK(blocks_.length() == 0);
DCHECK(entered_contexts_.length() == 0);
DCHECK(saved_contexts_.length() == 0);
blocks_.Free();
entered_contexts_.Free();
saved_contexts_.Free();
@ -561,7 +560,7 @@ class HandleScopeImplementer {
DeleteArray(spare_);
spare_ = NULL;
}
ASSERT(call_depth_ == 0);
DCHECK(call_depth_ == 0);
}
void BeginDeferredScope();
@ -664,7 +663,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
}
spare_ = block_start;
}
ASSERT((blocks_.is_empty() && prev_limit == NULL) ||
DCHECK((blocks_.is_empty() && prev_limit == NULL) ||
(!blocks_.is_empty() && prev_limit != NULL));
}

30
deps/v8/src/apinatives.js

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
"use strict";
// This file contains infrastructure used by the API. See
// v8natives.js for an explanation of these files are processed and
// loaded.
@ -28,10 +30,16 @@ function Instantiate(data, name) {
var Constructor = %GetTemplateField(data, kApiConstructorOffset);
// Note: Do not directly use a function template as a condition, our
// internal ToBoolean doesn't handle that!
var result = typeof Constructor === 'undefined' ?
{} : new (Instantiate(Constructor))();
var result;
if (typeof Constructor === 'undefined') {
result = {};
ConfigureTemplateInstance(result, data);
} else {
// ConfigureTemplateInstance is implicitly called before calling the API
// constructor in HandleApiCall.
result = new (Instantiate(Constructor))();
result = %ToFastProperties(result);
}
return result;
default:
throw 'Unknown API tag <' + tag + '>';
@ -49,9 +57,8 @@ function InstantiateFunction(data, name) {
if (!isFunctionCached) {
try {
var flags = %GetTemplateField(data, kApiFlagOffset);
var has_proto = !(flags & (1 << kRemovePrototypeBit));
var prototype;
if (has_proto) {
if (!(flags & (1 << kRemovePrototypeBit))) {
var template = %GetTemplateField(data, kApiPrototypeTemplateOffset);
prototype = typeof template === 'undefined'
? {} : Instantiate(template);
@ -61,16 +68,13 @@ function InstantiateFunction(data, name) {
// internal ToBoolean doesn't handle that!
if (typeof parent !== 'undefined') {
var parent_fun = Instantiate(parent);
%SetPrototype(prototype, parent_fun.prototype);
%InternalSetPrototype(prototype, parent_fun.prototype);
}
}
var fun = %CreateApiFunction(data, prototype);
if (name) %FunctionSetName(fun, name);
var doNotCache = flags & (1 << kDoNotCacheBit);
if (!doNotCache) cache[serialNumber] = fun;
if (has_proto && flags & (1 << kReadOnlyPrototypeBit)) {
%FunctionSetReadOnlyPrototype(fun);
}
ConfigureTemplateInstance(fun, data);
if (doNotCache) return fun;
} catch (e) {
@ -95,15 +99,15 @@ function ConfigureTemplateInstance(obj, data) {
var prop_data = properties[i + 2];
var attributes = properties[i + 3];
var value = Instantiate(prop_data, name);
%SetProperty(obj, name, value, attributes);
} else if (length == 5) {
%AddPropertyForTemplate(obj, name, value, attributes);
} else if (length == 4 || length == 5) {
// TODO(verwaest): The 5th value used to be access_control. Remove once
// the bindings are updated.
var name = properties[i + 1];
var getter = properties[i + 2];
var setter = properties[i + 3];
var attribute = properties[i + 4];
var access_control = properties[i + 5];
%SetAccessorProperty(
obj, name, getter, setter, attribute, access_control);
%DefineApiAccessorProperty(obj, name, getter, setter, attribute);
} else {
throw "Bad properties array";
}

6
deps/v8/src/arguments.cc

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "arguments.h"
#include "src/v8.h"
#include "vm-state-inl.h"
#include "src/arguments.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {

24
deps/v8/src/arguments.h

@ -5,7 +5,8 @@
#ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_
#include "allocation.h"
#include "src/allocation.h"
#include "src/isolate.h"
namespace v8 {
namespace internal {
@ -21,6 +22,9 @@ namespace internal {
// Object* Runtime_function(Arguments args) {
// ... use args[i] here ...
// }
//
// Note that length_ (whose value is in the integer range) is defined
// as intptr_t to provide endian-neutrality on 64-bit archs.
class Arguments BASE_EMBEDDED {
public:
@ -28,7 +32,7 @@ class Arguments BASE_EMBEDDED {
: length_(length), arguments_(arguments) { }
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
DCHECK(0 <= index && index < length_);
return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
index * kPointerSize));
}
@ -50,12 +54,12 @@ class Arguments BASE_EMBEDDED {
}
// Get the total number of arguments including the receiver.
int length() const { return length_; }
int length() const { return static_cast<int>(length_); }
Object** arguments() { return arguments_; }
private:
int length_;
intptr_t length_;
Object** arguments_;
};
@ -172,8 +176,8 @@ class PropertyCallbackArguments
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kHolderIndex]->IsHeapObject());
ASSERT(values[T::kIsolateIndex]->IsSmi());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
@ -244,9 +248,9 @@ class FunctionCallbackArguments
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kCalleeIndex]->IsJSFunction());
ASSERT(values[T::kHolderIndex]->IsHeapObject());
ASSERT(values[T::kIsolateIndex]->IsSmi());
DCHECK(values[T::kCalleeIndex]->IsJSFunction());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
/*
@ -280,7 +284,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
Object* Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \
Arguments args(args_length, args_object); \

280
deps/v8/src/arm/assembler-arm-inl.h

@ -37,16 +37,19 @@
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
#include "arm/assembler-arm.h"
#include "src/arm/assembler-arm.h"
#include "cpu.h"
#include "debug.h"
#include "src/assembler.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
int Register::NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
@ -68,8 +71,8 @@ int DwVfpRegister::NumAllocatableRegisters() {
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
DCHECK(!reg.is(kDoubleRegZero));
DCHECK(!reg.is(kScratchDoubleReg));
if (reg.code() > kDoubleRegZero.code()) {
return reg.code() - kNumReservedRegisters;
}
@ -78,8 +81,8 @@ int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
DCHECK(index >= 0 && index < NumAllocatableRegisters());
DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code()) {
return from_code(index + kNumReservedRegisters);
@ -88,7 +91,7 @@ DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
}
void RelocInfo::apply(intptr_t delta) {
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
@ -100,13 +103,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_ool_constant_pool ||
@ -115,22 +118,15 @@ Address RelocInfo::target_address_address() {
// serializerer and expects the address to reside within the code object.
return reinterpret_cast<Address>(pc_);
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_pointer_address_at(pc_);
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
return constant_pool_entry_address();
}
}
Address RelocInfo::constant_pool_entry_address() {
ASSERT(IsInConstantPool());
if (FLAG_enable_ool_constant_pool) {
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_constant_pool_address_at(pc_,
host_->constant_pool());
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
return Assembler::target_pointer_address_at(pc_);
}
DCHECK(IsInConstantPool());
return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
}
@ -139,10 +135,13 @@ int RelocInfo::target_address_size() {
}
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
@ -151,24 +150,26 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, host_,
reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@ -178,42 +179,46 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
ASSERT(IsRuntimeEntry(rmode_));
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode mode) {
ASSERT(IsRuntimeEntry(rmode_));
if (target_address() != target) set_target_address(target, mode);
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::CELL);
DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::CELL);
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
ASSERT(rmode_ == RelocInfo::CELL);
void RelocInfo::set_target_cell(Cell* cell,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
@ -232,15 +237,16 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start();
@ -250,14 +256,14 @@ void RelocInfo::set_code_age_stub(Code* stub) {
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
@ -279,14 +285,14 @@ void RelocInfo::set_call_object(Object* target) {
Object** RelocInfo::call_object_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::WipeOut() {
ASSERT(IsEmbeddedObject(rmode_) ||
DCHECK(IsEmbeddedObject(rmode_) ||
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
@ -300,8 +306,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
return ((current_instr & kLdrPCMask) == kLdrPCPattern)
&& ((next_instr & kBlxRegMask) == kBlxRegPattern);
return Assembler::IsLdrPcImmediateOffset(current_instr) &&
Assembler::IsBlxReg(next_instr);
}
@ -414,42 +420,6 @@ void Assembler::emit(Instr x) {
}
Address Assembler::target_pointer_address_at(Address pc) {
Instr instr = Memory::int32_at(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
Address Assembler::target_constant_pool_address_at(
Address pc, ConstantPoolArray* constant_pool) {
ASSERT(constant_pool != NULL);
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
return reinterpret_cast<Address>(constant_pool) +
GetLdrRegisterImmediateOffset(instr);
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
Instruction* next_instr = Instruction::At(pc + kInstrSize);
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
} else if (FLAG_enable_ool_constant_pool) {
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(
target_constant_pool_address_at(pc, constant_pool));
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
return Memory::Address_at(target_pointer_address_at(pc));
}
}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@ -458,8 +428,15 @@ Address Assembler::target_address_from_return_address(Address pc) {
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or pre-V7 or cases that need frequent patching:
// ldr ip, [pc, #...] @ call address
// Or pre-V7 or cases that need frequent patching, the address is in the
// constant pool. It could be a small constant pool load:
// ldr ip, [pc / pp, #...] @ call address
// blx ip
// @ return address
// Or an extended constant pool load:
// movw ip, #...
// movt ip, #...
// ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
@ -467,23 +444,41 @@ Address Assembler::target_address_from_return_address(Address pc) {
if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
} else if (IsLdrPpRegOffset(candidate_instr)) {
candidate = pc - 4 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
return candidate;
} else {
candidate = pc - 3 * Assembler::kInstrSize;
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
return candidate;
}
}
Address Assembler::break_address_from_return_address(Address pc) {
return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
// Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
// Load from constant pool, extended section.
return pc + kInstrSize * 4;
} else {
// A movw / movt load immediate.
return pc + kInstrSize * 3;
}
}
}
@ -497,45 +492,88 @@ void Assembler::deserialization_set_special_target_at(
}
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
bool Assembler::is_constant_pool_load(Address pc) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target) {
Address Assembler::constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool) {
if (FLAG_enable_ool_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
uint32_t intermediate = instr_ptr[0];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
instr_ptr[0] = intermediate;
intermediate = instr_ptr[1];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate >> 16);
instr_ptr[1] = intermediate;
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
} else if (FLAG_enable_ool_constant_pool) {
ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(
target_constant_pool_address_at(pc, constant_pool)) = target;
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
// This is an extended constant pool lookup.
Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue();
} else {
// This is a small constant pool lookup.
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
}
return reinterpret_cast<Address>(constant_pool) + cp_offset;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
} else {
// This is an movw_movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
return reinterpret_cast<Address>(
(movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
}
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// CpuFeatures::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pc, #...]
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
} else {
// This is an movw_movt immediate load. Patch the immediate embedded in the
// instructions.
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 2 * kInstrSize);
}
}
}

1187
deps/v8/src/arm/assembler-arm.cc

File diff suppressed because it is too large

306
deps/v8/src/arm/assembler-arm.h

@ -43,78 +43,13 @@
#include <stdio.h>
#include <vector>
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/serialize.h"
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe(bool serializer_enabled);
// Display target use when compiling.
static void PrintTarget();
// Display features.
static void PrintFeatures();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
return Check(f, supported_);
}
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
!(Serializer::enabled(isolate) &&
Check(f, found_by_runtime_probing_only_)));
}
static unsigned cache_line_size() { return cache_line_size_; }
static bool VerifyCrossCompiling() {
return cross_compile_ == 0;
}
static bool VerifyCrossCompiling(CpuFeature f) {
unsigned mask = flag2set(f);
return cross_compile_ == 0 ||
(cross_compile_ & mask) == mask;
}
static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(VFP3); }
private:
static bool Check(CpuFeature f, unsigned set) {
return (set & flag2set(f)) != 0;
}
static unsigned flag2set(CpuFeature f) {
return 1u << f;
}
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
static unsigned cache_line_size_;
static unsigned cross_compile_;
friend class ExternalReference;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@ -165,17 +100,17 @@ struct Register {
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kMaxNumAllocatableRegisters);
DCHECK(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@ -201,17 +136,17 @@ struct Register {
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
DCHECK(is_valid());
return 1 << code_;
}
void set_code(int code) {
code_ = code;
ASSERT(is_valid());
DCHECK(is_valid());
}
// Unfortunately we can't make this private in a struct.
@ -247,15 +182,15 @@ struct SwVfpRegister {
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
DCHECK(is_valid());
return 1 << code_;
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
DCHECK(is_valid());
*m = code_ & 0x1;
*vm = code_ >> 1;
}
@ -297,15 +232,15 @@ struct DwVfpRegister {
}
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
DCHECK(is_valid());
return 1 << code_;
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
DCHECK(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
@ -336,21 +271,21 @@ struct LowDwVfpRegister {
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
SwVfpRegister low() const {
SwVfpRegister reg;
reg.code_ = code_ * 2;
ASSERT(reg.is_valid());
DCHECK(reg.is_valid());
return reg;
}
SwVfpRegister high() const {
SwVfpRegister reg;
reg.code_ = (code_ * 2) + 1;
ASSERT(reg.is_valid());
DCHECK(reg.is_valid());
return reg;
}
@ -372,11 +307,11 @@ struct QwNeonRegister {
}
bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
DCHECK(is_valid());
int encoded_code = code_ << 1;
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
@ -490,11 +425,11 @@ struct CRegister {
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(CRegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
DCHECK(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
DCHECK(is_valid());
return 1 << code_;
}
@ -583,19 +518,22 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary. If
// Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction,
// if no load into the ip register is necessary, or anything between 2 and 4
// instructions when we need to load from the constant pool (depending upon
// whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Isolate* isolate,
const Assembler* assembler,
Instr instr = 0) const;
bool must_output_reloc_info(Isolate* isolate,
const Assembler* assembler) const;
//
// The value returned is only valid as long as no entries are added to the
// constant pool between this call and the actual instruction being emitted.
int instructions_required(const Assembler* assembler, Instr instr = 0) const;
bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
DCHECK(!rm_.is_valid());
return imm32_;
}
@ -643,12 +581,12 @@ class MemOperand BASE_EMBEDDED {
}
void set_offset(int32_t offset) {
ASSERT(rm_.is(no_reg));
DCHECK(rm_.is(no_reg));
offset_ = offset;
}
uint32_t offset() const {
ASSERT(rm_.is(no_reg));
DCHECK(rm_.is(no_reg));
return offset_;
}
@ -711,59 +649,48 @@ class NeonListOperand BASE_EMBEDDED {
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
explicit ConstantPoolBuilder();
void AddEntry(Assembler* assm, const RelocInfo& rinfo);
ConstantPoolBuilder();
ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline int count_of_64bit() const { return count_of_64bit_; }
inline int count_of_code_ptr() const { return count_of_code_ptr_; }
inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
inline int count_of_32bit() const { return count_of_32bit_; }
inline ConstantPoolArray::LayoutSection current_section() const {
return current_section_;
}
private:
bool Is64BitEntry(RelocInfo::Mode rmode);
bool Is32BitEntry(RelocInfo::Mode rmode);
bool IsCodePtrEntry(RelocInfo::Mode rmode);
bool IsHeapPtrEntry(RelocInfo::Mode rmode);
// TODO(rmcilroy): This should ideally be a ZoneList, however that would mean
// RelocInfo would need to subclass ZoneObject which it currently doesn't.
std::vector<RelocInfo> entries_;
std::vector<int> merged_indexes_;
int count_of_64bit_;
int count_of_code_ptr_;
int count_of_heap_ptr_;
int count_of_32bit_;
};
inline ConstantPoolArray::NumberOfEntries* number_of_entries(
ConstantPoolArray::LayoutSection section) {
return &number_of_entries_[section];
}
inline ConstantPoolArray::NumberOfEntries* small_entries() {
return number_of_entries(ConstantPoolArray::SMALL_SECTION);
}
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kLdrPpMask;
extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
inline ConstantPoolArray::NumberOfEntries* extended_entries() {
return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
}
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
private:
struct ConstantPoolEntry {
ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
int merged_index)
: rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
RelocInfo rinfo_;
ConstantPoolArray::LayoutSection section_;
int merged_index_;
};
extern const Instr kMovLeaveCCMask;
extern const Instr kMovLeaveCCPattern;
extern const Instr kMovwMask;
extern const Instr kMovwPattern;
extern const Instr kMovwLeaveCCFlip;
ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
std::vector<ConstantPoolEntry> entries_;
ConstantPoolArray::LayoutSection current_section_;
ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
struct VmovIndex {
unsigned char index;
@ -816,13 +743,13 @@ class Assembler : public AssemblerBase {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
// Returns true if the given pc address is the start of a constant pool load
// instruction sequence.
INLINE(static bool is_constant_pool_load(Address pc));
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_constant_pool_address_at(
INLINE(static Address constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
@ -830,16 +757,20 @@ class Assembler : public AssemblerBase {
ConstantPoolArray* constant_pool));
INLINE(static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target));
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
Code* code,
Address target)) {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target);
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
// Return the code target address at a call site from the return address
@ -850,6 +781,9 @@ class Assembler : public AssemblerBase {
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
// Return the code target address of the patch debug break slot
INLINE(static Address break_address_from_return_address(Address pc));
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@ -981,10 +915,8 @@ class Assembler : public AssemblerBase {
void mov_label_offset(Register dst, Label* label);
// ARMv7 instructions for loading a 32 bit immediate in two instructions.
// This may actually emit a different mov instruction, but on an ARMv7 it
// is guaranteed to only emit one instruction.
// The constant for movw and movt should be in the range 0-0xffff.
void movw(Register reg, uint32_t immediate, Condition cond = al);
// The constant for movt should be in the range 0-0xffff.
void movt(Register reg, uint32_t immediate, Condition cond = al);
void bic(Register dst, Register src1, const Operand& src2,
@ -993,6 +925,35 @@ class Assembler : public AssemblerBase {
void mvn(Register dst, const Operand& src,
SBit s = LeaveCC, Condition cond = al);
// Shift instructions
void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
}
}
void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
}
}
void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
Condition cond = al) {
if (src2.is_reg()) {
mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
} else {
mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
}
}
// Multiply instructions
void mla(Register dst, Register src1, Register src2, Register srcA,
@ -1004,6 +965,8 @@ class Assembler : public AssemblerBase {
void sdiv(Register dst, Register src1, Register src2,
Condition cond = al);
void udiv(Register dst, Register src1, Register src2, Condition cond = al);
void mul(Register dst, Register src1, Register src2,
SBit s = LeaveCC, Condition cond = al);
@ -1361,7 +1324,7 @@ class Assembler : public AssemblerBase {
}
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
// Check whether an immediate fits an addressing mode 2 instruction.
bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
@ -1393,12 +1356,12 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) {
ASSERT(recorded_ast_id_.IsNone());
DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
TypeFeedbackId RecordedAstId() {
ASSERT(!recorded_ast_id_.IsNone());
DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
@ -1416,9 +1379,9 @@ class Assembler : public AssemblerBase {
// function, compiled with and without debugger support (see for example
// Debug::PrepareForBreakPoints()).
// Compiling functions with debugger support generates additional code
// (Debug::GenerateSlot()). This may affect the emission of the constant
// pools and cause the version of the code with debugger support to have
// constant pools generated in different places.
// (DebugCodegen::GenerateSlot()). This may affect the emission of the
// constant pools and cause the version of the code with debugger support to
// have constant pools generated in different places.
// Recording the position and size of emitted constant pools allows to
// correctly compute the offset mappings between the different versions of a
// function in all situations.
@ -1453,6 +1416,10 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static Instr GetConsantPoolLoadPattern();
static Instr GetConsantPoolLoadMask();
static bool IsLdrPpRegOffset(Instr instr);
static Instr GetLdrPpRegOffsetPattern();
static bool IsLdrPpImmediateOffset(Instr instr);
static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
@ -1474,6 +1441,8 @@ class Assembler : public AssemblerBase {
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsVldrDPcImmediateOffset(Instr instr);
static bool IsBlxReg(Instr instr);
static bool IsBlxIp(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
@ -1481,7 +1450,11 @@ class Assembler : public AssemblerBase {
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovT(Instr instr);
static Instr GetMovTPattern();
static bool IsMovW(Instr instr);
static Instr GetMovWPattern();
static Instr EncodeMovwImmediate(uint32_t immediate);
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
@ -1506,14 +1479,14 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
bool can_use_constant_pool() const {
return is_constant_pool_available() && !constant_pool_full_;
}
bool is_constant_pool_available() const { return constant_pool_available_; }
void set_constant_pool_full() {
constant_pool_full_ = true;
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
}
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@ -1547,10 +1520,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@ -1567,10 +1540,6 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
bool is_constant_pool_available() const {
return constant_pool_available_;
}
void set_constant_pool_available(bool available) {
constant_pool_available_ = available;
}
@ -1640,9 +1609,6 @@ class Assembler : public AssemblerBase {
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
bool constant_pool_available_;
// Indicates whether the constant pool is too full to accept new entries due
// to the ldr instruction's limitted immediate offset range.
bool constant_pool_full_;
// Code emission
inline void CheckBuffer();
@ -1674,7 +1640,7 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
void ConstantPoolAddEntry(const RelocInfo& rinfo);
ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;

138
deps/v8/src/arm/builtins-arm.cc

@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
#include "stub-cache.h"
#include "src/codegen.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/runtime.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@ -40,7 +40,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
num_extra_args = 1;
__ push(r1);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects r0 to contain the number of arguments
@ -303,7 +303,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@ -313,7 +313,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -323,14 +322,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
// Should never create mementos for api functions.
ASSERT(!is_api_function || !create_memento);
// Should never create mementos before slack tracking is finished.
ASSERT(!count_constructions || !create_memento);
DCHECK(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@ -375,22 +368,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
if (count_constructions) {
if (!is_api_function) {
Label allocate;
MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ ldr(r4, bit_field3);
__ DecodeField<Map::ConstructionCount>(r3, r4);
__ cmp(r3, Operand(JSFunction::kNoSlackTracking));
__ b(eq, &allocate);
// Decrease generous allocation count.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
MemOperand constructor_count =
FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
__ ldrb(r4, constructor_count);
__ sub(r4, r4, Operand(1), SetCC);
__ strb(r4, constructor_count);
__ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift));
__ str(r4, bit_field3);
__ cmp(r3, Operand(JSFunction::kFinishSlackTracking));
__ b(ne, &allocate);
__ push(r1);
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@ -416,11 +411,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Fill all the in-object properties with the appropriate filler.
@ -429,10 +424,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
if (count_constructions) {
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (!is_api_function) {
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
__ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
__ DecodeField<Map::ConstructionCount>(ip);
__ cmp(ip, Operand(JSFunction::kNoSlackTracking));
__ b(eq, &no_inobject_slack_tracking);
// Allocate object with a slack.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@ -446,25 +450,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
} else if (create_memento) {
__ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize));
__ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object.
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
// Fill the remaining fields with one pointer filler map.
__ bind(&no_inobject_slack_tracking);
}
if (create_memento) {
__ sub(ip, r3, Operand(AllocationMemento::kSize / kPointerSize));
__ add(r0, r4, Operand(ip, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
// Fill in memento fields.
// r5: points to the allocated but uninitialized memento.
__ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Load the AllocationSite
__ ldr(r6, MemOperand(sp, 2 * kPointerSize));
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
} else {
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
}
@ -517,9 +522,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: FixedArray (not tagged)
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ SmiTag(r0, r3);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
@ -530,7 +535,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: JSObject
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
@ -573,9 +578,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r1); // argument for Runtime_NewObject
if (create_memento) {
__ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
} else {
__ CallRuntime(Runtime::kHiddenNewObject, 1);
__ CallRuntime(Runtime::kNewObject, 1);
}
__ mov(r4, r0);
@ -655,7 +660,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function && !count_constructions) {
if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@ -707,18 +712,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
Generate_JSConstructStubHelper(masm, true, false);
}
@ -809,7 +809,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@ -823,7 +823,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
__ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
@ -918,7 +918,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@ -944,7 +944,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
__ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@ -1035,7 +1035,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ b(hs, &ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHiddenStackGuard, 0);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@ -1071,7 +1071,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r1: function
Label shift_arguments;
__ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
{ Label convert_to_object, use_global_proxy, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@ -1096,10 +1096,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
__ b(eq, &use_global_proxy);
__ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
__ b(eq, &use_global_proxy);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
@ -1128,9 +1128,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
__ bind(&use_global_receiver);
__ bind(&use_global_proxy);
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
__ bind(&patch_receiver);
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
@ -1284,7 +1284,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
// Do not transform the receiver for strict mode functions.
Label call_to_object, use_global_receiver;
Label call_to_object, use_global_proxy;
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
@ -1298,10 +1298,10 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
__ b(eq, &use_global_proxy);
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
__ b(eq, &use_global_proxy);
// Check if the receiver is already a JavaScript object.
// r0: receiver
@ -1316,9 +1316,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
__ bind(&use_global_receiver);
__ bind(&use_global_proxy);
__ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalProxyOffset));
// Push the receiver.
// r0: receiver

860
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

69
deps/v8/src/arm/code-stubs-arm.h

@ -5,7 +5,7 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
#include "ic-inl.h"
#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@ -27,8 +27,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
Major MajorKey() const { return StoreBufferOverflow; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
@ -38,15 +38,12 @@ class StringHelper : public AllStatic {
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharactersLong(MacroAssembler* masm,
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
int flags);
Register scratch,
String::Encoding encoding);
// Generate string hash.
@ -71,8 +68,8 @@ class SubStringStub: public PlatformCodeStub {
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
Major MajorKey() const { return SubString; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@ -102,8 +99,8 @@ class StringCompareStub: public PlatformCodeStub {
Register scratch3);
private:
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual Major MajorKey() const { return StringCompare; }
virtual int MinorKey() const { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
@ -142,8 +139,8 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
Major MajorKey() const { return WriteInt32ToHeapNumber; }
int MinorKey() const {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
@ -183,12 +180,12 @@ class RecordWriteStub: public PlatformCodeStub {
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
@ -200,13 +197,13 @@ class RecordWriteStub: public PlatformCodeStub {
return INCREMENTAL;
}
ASSERT(Assembler::IsTstImmediate(first_instruction));
DCHECK(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
ASSERT(Assembler::IsTstImmediate(second_instruction));
DCHECK(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
@ -217,22 +214,23 @@ class RecordWriteStub: public PlatformCodeStub {
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
DCHECK(GetMode(stub) == mode);
CpuFeatures::FlushICache(stub->instruction_start(),
2 * Assembler::kInstrSize);
}
private:
@ -247,12 +245,12 @@ class RecordWriteStub: public PlatformCodeStub {
: object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
@ -307,9 +305,9 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
Major MajorKey() const { return RecordWrite; }
int MinorKey() {
int MinorKey() const {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
@ -349,8 +347,8 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
Major MajorKey() const { return DirectCEntry; }
int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; }
};
@ -395,11 +393,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return NameDictionaryLookup; }
Major MajorKey() const { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
@ -407,8 +403,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }

376
deps/v8/src/arm/codegen-arm.cc

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-arm.h"
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@ -29,7 +29,8 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
@ -64,10 +65,10 @@ UnaryMathFunction CreateExpFunction() {
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
CpuFeatures::FlushICache(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
@ -78,14 +79,14 @@ UnaryMathFunction CreateExpFunction() {
}
#if defined(V8_HOST_ARCH_ARM)
OS::MemCopyUint8Function CreateMemCopyUint8Function(
OS::MemCopyUint8Function stub) {
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@ -224,24 +225,25 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
CpuFeatures::FlushICache(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
// Convert 8 to 16. The number of character to copy must be at least 8.
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
OS::MemCopyUint16Uint8Function stub) {
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@ -312,10 +314,10 @@ OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
CpuFeatures::FlushICache(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
@ -325,7 +327,8 @@ UnaryMathFunction CreateSqrtFunction() {
return &std::sqrt;
#else
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::sqrt;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@ -337,10 +340,10 @@ UnaryMathFunction CreateSqrtFunction() {
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
CpuFeatures::FlushICache(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}
@ -353,14 +356,14 @@ UnaryMathFunction CreateSqrtFunction() {
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
@ -371,26 +374,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
Register scratch_elements = r4;
DCHECK(!AreAliased(receiver, key, value, target_map,
scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch_elements, allocation_memento_found);
}
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
target_map,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@ -400,87 +405,103 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r4;
Register length = r5;
Register array = r6;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r5: number of elements (smi-tagged)
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag));
__ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
__ add(scratch1, array, Operand(kHeapObjectTag));
__ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
r3,
r9,
scratch1,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r9: begin of FixedDoubleArray element fields, not tagged
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_end, scratch2, Operand(length, LSL, 2));
// Repurpose registers no longer in use.
Register hole_lower = elements;
Register hole_upper = length;
__ mov(hole_lower, Operand(kHoleNanLower32));
__ mov(hole_upper, Operand(kHoleNanUpper32));
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
__ bind(&only_change_map);
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@ -494,15 +515,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
__ ldr(lr, MemOperand(r3, 4, PostIndex));
__ ldr(lr, MemOperand(scratch1, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r9, 0);
__ add(r9, r9, Operand(8));
__ vstr(d0, scratch2, 0);
__ add(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
@ -514,10 +535,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
__ bind(&entry);
__ cmp(r9, r6);
__ cmp(scratch2, array_end);
__ b(lt, &loop);
__ pop(lr);
@ -526,80 +547,104 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = r4;
Register array = r6;
Register length = r5;
Register scratch = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
__ Push(target_map, receiver, key, value);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
__ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
// r9: heap number map
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ add(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// heap_number_map: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(r3, r2, r1, r0);
__ Pop(target_map, receiver, key, value);
__ pop(lr);
__ b(fail);
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
// r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
Register upper_bits = key;
__ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ cmp(upper_bits, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
// r2: new heap number
__ ldr(r0, MemOperand(r4, 12, NegOffset));
__ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
__ mov(r0, r3);
__ str(r2, MemOperand(r3, 4, PostIndex));
__ RecordWrite(r6,
r0,
r2,
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
&gc_required);
// heap_number: new heap number
__ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
__ Strd(scratch2, upper_bits,
FieldMemOperand(heap_number, HeapNumber::kValueOffset));
__ mov(scratch2, dst_elements);
__ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
__ RecordWrite(array,
scratch2,
heap_number,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@ -608,20 +653,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, MemOperand(r3, 4, PostIndex));
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
__ cmp(dst_elements, dst_end);
__ b(lt, &loop);
__ Pop(r3, r2, r1, r0);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2,
__ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
r6,
r9,
array,
scratch,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
@ -630,11 +675,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&only_change_map);
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
r3,
r9,
target_map,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
@ -709,7 +754,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(result, Operand(kShortExternalStringMask));
__ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
@ -742,16 +787,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Register temp1,
Register temp2,
Register temp3) {
ASSERT(!input.is(result));
ASSERT(!input.is(double_scratch1));
ASSERT(!input.is(double_scratch2));
ASSERT(!result.is(double_scratch1));
ASSERT(!result.is(double_scratch2));
ASSERT(!double_scratch1.is(double_scratch2));
ASSERT(!temp1.is(temp2));
ASSERT(!temp1.is(temp3));
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!input.is(result));
DCHECK(!input.is(double_scratch1));
DCHECK(!input.is(double_scratch2));
DCHECK(!result.is(double_scratch1));
DCHECK(!result.is(double_scratch2));
DCHECK(!double_scratch1.is(double_scratch2));
DCHECK(!temp1.is(temp2));
DCHECK(!temp1.is(temp3));
DCHECK(!temp2.is(temp3));
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label zero, infinity, done;
@ -782,7 +828,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
// Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
ASSERT(*reinterpret_cast<double*>
DCHECK(*reinterpret_cast<double*>
(ExternalReference::math_exp_constants(8).address()) == 1);
__ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
@ -823,7 +869,7 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
CodeAgingHelper::CodeAgingHelper() {
ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
@ -849,7 +895,7 @@ bool CodeAgingHelper::IsOld(byte* candidate) const {
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
@ -875,7 +921,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length);
CpuFeatures::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);

4
deps/v8/src/arm/codegen-arm.h

@ -5,8 +5,8 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
#include "ast.h"
#include "ic-inl.h"
#include "src/ast.h"
#include "src/ic-inl.h"
namespace v8 {
namespace internal {

8
deps/v8/src/arm/constants-arm.cc

@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
#include "src/arm/constants-arm.h"
namespace v8 {
@ -28,7 +28,7 @@ double Instruction::DoubleImmedVmov() const {
uint64_t imm = high16 << 48;
double d;
OS::MemCopy(&d, &imm, 8);
memcpy(&d, &imm, 8);
return d;
}
@ -81,7 +81,7 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
const char* VFPRegisters::Name(int reg, bool is_double) {
ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
}

71
deps/v8/src/arm/constants-arm.h

@ -19,11 +19,11 @@ const int kConstantPoolMarkerMask = 0xfff000f0;
const int kConstantPoolMarker = 0xe7f000f0;
const int kConstantPoolLengthMaxMask = 0xffff;
inline int EncodeConstantPoolLength(int length) {
ASSERT((length & kConstantPoolLengthMaxMask) == length);
DCHECK((length & kConstantPoolLengthMaxMask) == length);
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
@ -84,13 +84,13 @@ enum Condition {
inline Condition NegateCondition(Condition cond) {
ASSERT(cond != al);
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cond) {
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
@ -110,7 +110,7 @@ inline Condition ReverseCondition(Condition cond) {
return ge;
default:
return cond;
};
}
}
@ -405,64 +405,6 @@ enum Hint { no_hint };
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// These constants are declared in assembler-arm.cc, as they use named registers
// and other constants.
// add(sp, sp, 4) instruction (aka Pop())
extern const Instr kPopInstruction;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
extern const Instr kPushRegPattern;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
extern const Instr kPopRegPattern;
// mov lr, pc
extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
// vldr dd, [pc, #offset]
extern const Instr kVldrDPCMask;
extern const Instr kVldrDPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
extern const Instr kMovLeaveCCMask;
extern const Instr kMovLeaveCCPattern;
extern const Instr kMovwMask;
extern const Instr kMovwPattern;
extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
// A mask for the Rd register for push, pop, ldr, str instructions.
extern const Instr kLdrRegFpOffsetPattern;
extern const Instr kStrRegFpOffsetPattern;
extern const Instr kLdrRegFpNegOffsetPattern;
extern const Instr kStrRegFpNegOffsetPattern;
extern const Instr kLdrStrInstrTypeMask;
extern const Instr kLdrStrInstrArgumentMask;
extern const Instr kLdrStrOffsetMask;
// -----------------------------------------------------------------------------
// Instruction abstraction.
@ -626,6 +568,7 @@ class Instruction {
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }

72
deps/v8/src/arm/cpu-arm.cc

@ -12,22 +12,20 @@
#endif
#endif
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "cpu.h"
#include "macro-assembler.h"
#include "simulator.h" // for cache flushing.
#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
void CPU::FlushICache(void* start, size_t size) {
// Nothing to do flushing no instructions.
if (size == 0) {
return;
}
void CpuFeatures::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
@ -36,47 +34,31 @@ void CPU::FlushICache(void* start, size_t size) {
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#elif V8_OS_QNX
msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
// reinterpret_cast<intptr_t>(start) + size, 0);
// however, syscall(int, ...) is not supported on all platforms, especially
// not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
register uint32_t beg asm("r0") = reinterpret_cast<uint32_t>(start);
register uint32_t end asm("r1") = beg + size;
register uint32_t flg asm("r2") = 0;
register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"svc 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
// r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: push {r7} \n\t"
"mov r7, %4 \n\t"
"svc 0x0 \n\t"
"pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
// This assembly works for both ARM and Thumb targets.
// Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
// Thumb targets.
" push {r7}\n"
// r0 = beg
// r1 = end
// r2 = flags (0)
" ldr r7, =%c[scno]\n" // r7 = syscall number
" svc 0\n"
" pop {r7}\n"
:
: "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
: "memory");
#endif
}

146
deps/v8/src/arm/debug-arm.cc

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "codegen.h"
#include "debug.h"
#include "src/codegen.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
@ -27,7 +27,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// ldr ip, [pc, #0]
// blx ip
// <debug break return code entry point address>
// bktp 0
// bkpt 0
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
@ -47,20 +47,20 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
// A debug break in the frame exit code is identified by the JS frame exit code
// having been patched with a call instruction.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
// mov r2, r2
// mov r2, r2
@ -78,13 +78,11 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm)
@ -95,12 +93,20 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
__ push(ip);
}
__ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
__ push(ip);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
DCHECK((object_regs & ~kJSCallerSaved) == 0);
DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
DCHECK((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
@ -141,6 +147,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
}
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
@ -148,14 +157,14 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
ExternalReference::debug_after_break_target_address(masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}
void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- r1 : function
@ -165,54 +174,41 @@ void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
Register receiver = LoadIC::ReceiverRegister();
Register name = LoadIC::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
// Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
Register receiver = StoreIC::ReceiverRegister();
Register name = StoreIC::NameRegister();
Register value = StoreIC::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for keyed IC load (from ic-arm.cc).
GenerateLoadICDebugBreak(masm);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc).
Register receiver = KeyedStoreIC::ReceiverRegister();
Register name = KeyedStoreIC::NameRegister();
Register value = KeyedStoreIC::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
@ -221,7 +217,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@ -229,7 +225,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
@ -238,7 +234,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
@ -248,7 +244,8 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
@ -260,7 +257,7 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateSlot(MacroAssembler* masm) {
void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
@ -270,28 +267,55 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(MacroAssembler::DEBUG_BREAK_NOP);
}
ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
masm->InstructionsGeneratedSince(&check_codesize));
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
__ Ret();
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference::debug_restarter_frame_function_pointer_address(
masm->isolate());
__ mov(ip, Operand(restarter_frame_function_slot));
__ mov(r1, Operand::Zero());
__ str(r1, MemOperand(ip, 0));
// Load the function pointer off of our current stack frame.
__ ldr(r1, MemOperand(fp,
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_ool_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
// Load context from the function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Get function code.
__ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Re-run JSFunction, r1 is function, cp is context.
__ Jump(ip);
}
}
const bool Debug::kFrameDropperSupported = false;
const bool LiveEdit::kFrameDropperSupported = true;
#undef __

42
deps/v8/src/arm/deoptimizer-arm.cc

@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 12;
const int Deoptimizer::table_entry_size_ = 8;
int Deoptimizer::patch_size() {
@ -49,9 +49,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
SharedFunctionInfo* shared =
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
@ -68,13 +65,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
deopt_entry,
RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
@ -105,7 +102,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
@ -128,11 +125,6 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
}
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@ -150,8 +142,8 @@ void Deoptimizer::EntryGenerator::Generate() {
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
DCHECK(kDoubleRegZero.code() == 14);
DCHECK(kScratchDoubleReg.code() == 15);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
@ -202,7 +194,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
@ -333,11 +325,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
__ push(ip);
}
@ -352,7 +344,7 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
ASSERT(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_ool_constant_pool);
SetFrameSlot(offset, value);
}

141
deps/v8/src/arm/disasm-arm.cc

@ -24,18 +24,18 @@
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "constants-arm.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
#include "src/arm/constants-arm.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
namespace v8 {
@ -207,14 +207,14 @@ void Decoder::PrintShiftRm(Instruction* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d",
shift_names[shift_index],
shift_amount);
} else {
// by register
int rs = instr->RsValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
@ -227,8 +227,7 @@ void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d", imm);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}
@ -236,7 +235,7 @@ void Decoder::PrintShiftImm(Instruction* instr) {
void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d",
shift_names[instr->Bit(6) * 2],
instr->Bits(11, 7));
@ -283,12 +282,12 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
return;
default:
if (svc >= kStopCode) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d - 0x%x",
svc & kStopCodeMask,
svc & kStopCodeMask);
} else {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
svc);
}
@ -300,7 +299,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
DCHECK(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnValue();
PrintRegister(reg);
@ -323,7 +322,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
DCHECK(STRING_STARTS_WITH(format, "rlist"));
int rlist = instr->RlistValue();
int reg = 0;
Print("{");
@ -349,7 +348,7 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
DCHECK((format[0] == 'S') || (format[0] == 'D'));
VFPRegPrecision precision =
format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
@ -399,26 +398,26 @@ int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
void Decoder::FormatNeonList(int Vd, int type) {
if (type == nlt_1) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d}", Vd);
} else if (type == nlt_2) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d}", Vd, Vd + 1);
} else if (type == nlt_3) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
} else if (type == nlt_4) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
}
}
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"[r%d", Rn);
if (align != 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
":%d", (1 << align) << 6);
}
if (Rm == 15) {
@ -426,7 +425,7 @@ void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
} else if (Rm == 13) {
Print("]!");
} else {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"], r%d", Rm);
}
}
@ -437,8 +436,7 @@ void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
int rd = instr->RdValue();
PrintRegister(rd);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", imm);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
}
@ -464,14 +462,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
case 'c': { // 'cond: conditional execution
ASSERT(STRING_STARTS_WITH(format, "cond"));
DCHECK(STRING_STARTS_WITH(format, "cond"));
PrintCondition(instr);
return 4;
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%g", d);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
return 1;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
@ -481,10 +478,10 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
// BFC/BFI:
// Bits 20-16 represent most-significant bit. Covert to width.
width -= lsbit;
ASSERT(width > 0);
DCHECK(width > 0);
}
ASSERT((width + lsbit) <= 32);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
DCHECK((width + lsbit) <= 32);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width);
return 1;
}
@ -501,11 +498,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
int width = (format[3] - '0') * 10 + (format[4] - '0');
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
ASSERT((width >= 1) && (width <= 32));
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
DCHECK((width >= 1) && (width <= 32));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width + lsb) <= 32);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
@ -523,7 +520,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 2;
}
if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop"));
DCHECK(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
} else {
@ -541,38 +538,37 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 5;
}
// 'msg: for simulator break instructions
ASSERT(STRING_STARTS_WITH(format, "msg"));
DCHECK(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s", converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
DCHECK(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19"));
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
(instr->Bits(19, 8) << 4) +
instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
DCHECK(STRING_STARTS_WITH(format, "off8"));
int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", offs8);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
return 4;
}
case 'p': { // 'pu: P and U bits for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "pu"));
DCHECK(STRING_STARTS_WITH(format, "pu"));
PrintPU(instr);
return 2;
}
@ -582,29 +578,29 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 's': {
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
DCHECK(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
ASSERT(instr->TypeValue() == 1);
DCHECK(instr->TypeValue() == 1);
PrintShiftImm(instr);
}
return 8;
} else if (format[6] == 's') { // 'shift_sat.
ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
DCHECK(STRING_STARTS_WITH(format, "shift_sat"));
PrintShiftSat(instr);
return 9;
} else { // 'shift_rm
ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
DCHECK(STRING_STARTS_WITH(format, "shift_rm"));
PrintShiftRm(instr);
return 8;
}
} else if (format[1] == 'v') { // 'svc
ASSERT(STRING_STARTS_WITH(format, "svc"));
DCHECK(STRING_STARTS_WITH(format, "svc"));
PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
ASSERT(STRING_STARTS_WITH(format, "sign"));
DCHECK(STRING_STARTS_WITH(format, "sign"));
if (instr->HasSign()) {
Print("s");
}
@ -617,9 +613,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target"));
DCHECK(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Value() << 2) + 8;
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%+d -> %s",
off,
converter_.NameOfAddress(
@ -1101,13 +1097,16 @@ void Decoder::DecodeType3(Instruction* instr) {
}
case db_x: {
if (FLAG_enable_sudiv) {
if (!instr->HasW()) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
if (instr->Bit(21) == 0x1) {
// UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
} else {
// SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
break;
}
break;
}
}
}
@ -1184,11 +1183,11 @@ int Decoder::DecodeType7(Instruction* instr) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"\n %p %08x stop message: %s",
reinterpret_cast<int32_t*>(instr
reinterpret_cast<void*>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<char**>(instr
*reinterpret_cast<uint32_t*>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<char**>(instr
+ Instruction::kInstrSize));
@ -1251,7 +1250,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
// vcvt.f64.s32 Dd, Dd, #<fbits>
int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
@ -1547,7 +1546,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
@ -1561,7 +1560,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
@ -1576,7 +1575,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vst1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
@ -1589,7 +1588,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vld1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
@ -1604,13 +1603,13 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Rn = instr->Bits(19, 16);
int offset = instr->Bits(11, 0);
if (offset == 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d]", Rn);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #-%d]", Rn, offset);
} else {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #+%d]", Rn, offset);
}
} else {
@ -1645,7 +1644,7 @@ int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
@ -1654,7 +1653,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
@ -1663,7 +1662,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
// instruction.
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
DecodeType2(instr);
OS::SNPrintF(out_buffer_ + out_buffer_pos_,
SNPrintF(out_buffer_ + out_buffer_pos_,
" (0x%08x)", target->InstructionBits());
return 2 * Instruction::kInstrSize;
}
@ -1716,7 +1715,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}

21
deps/v8/src/arm/frames-arm.cc

@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "assembler.h"
#include "assembler-arm.h"
#include "assembler-arm-inl.h"
#include "frames.h"
#include "macro-assembler.h"
#include "macro-assembler-arm.h"
#include "src/assembler.h"
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/assembler-arm.h"
#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@ -20,7 +21,7 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
ASSERT(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_ool_constant_pool);
return pp;
}
@ -28,13 +29,13 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
ASSERT(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_ool_constant_pool);
return pp;
}
Object*& ExitFrame::constant_pool_slot() const {
ASSERT(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_ool_constant_pool);
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
}

2
deps/v8/src/arm/frames-arm.h

@ -29,8 +29,6 @@ const RegList kJSCallerSaved =
const int kNumJSCallerSaved = 4;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);

701
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

471
deps/v8/src/arm/ic-arm.cc

@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "assembler-arm.h"
#include "code-stubs.h"
#include "codegen.h"
#include "disasm.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
#include "src/arm/assembler-arm.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/ic-inl.h"
#include "src/runtime.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@ -39,48 +39,6 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
}
// Generated code falls through if the receiver is a regular non-global
// JS object with slow properties and no interceptors.
static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
Register receiver,
Register elements,
Register t0,
Register t1,
Label* miss) {
// Register usage:
// receiver: holds the receiver on entry and is unchanged.
// elements: holds the property dictionary on fall through.
// Scratch registers:
// t0: used to holds the receiver map.
// t1: used to holds the receiver instance type, receiver bit mask and
// elements map.
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
__ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, miss);
// If this assert fails, we have to check upper bound too.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, t1, miss);
// Check that the global object does not require access checks.
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
__ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
__ b(ne, miss);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@ -211,7 +169,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ cmp(scratch, Operand(JS_OBJECT_TYPE));
__ b(lt, slow);
@ -311,16 +269,17 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
// The return address is in lr.
Register receiver = ReceiverRegister();
Register name = NameRegister();
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
// Probe the stub cache.
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
masm, flags, receiver, name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -328,37 +287,35 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
Label miss;
Register dictionary = r0;
DCHECK(!dictionary.is(ReceiverRegister()));
DCHECK(!dictionary.is(NameRegister()));
GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
Label slow;
// r1: elements
GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
__ ldr(dictionary,
FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
__ Ret();
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
GenerateRuntimeGetProperty(masm);
}
// A register that isn't one of the parameters to the load ic.
static const Register LoadIC_TempRegister() { return r3; }
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
// The return address is in lr.
Isolate* isolate = masm->isolate();
__ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
__ mov(r3, r0);
__ Push(r3, r2);
__ mov(LoadIC_TempRegister(), ReceiverRegister());
__ Push(LoadIC_TempRegister(), NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@ -368,14 +325,10 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
// The return address is in lr.
__ mov(r3, r0);
__ Push(r3, r2);
__ mov(LoadIC_TempRegister(), ReceiverRegister());
__ Push(LoadIC_TempRegister(), NameRegister());
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
@ -467,25 +420,26 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
// The return address is in lr.
Register receiver = ReceiverRegister();
Register key = NameRegister();
DCHECK(receiver.is(r1));
DCHECK(key.is(r2));
Label slow, notin;
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
GenerateMappedArgumentsLookup(
masm, receiver, key, r0, r3, r4, &notin, &slow);
__ ldr(r0, mapped_location);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r2.
// The unmapped lookup expects that the parameter map is in r0.
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
__ ldr(r2, unmapped_location);
GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
__ ldr(r0, unmapped_location);
__ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ cmp(r2, r3);
__ cmp(r0, r3);
__ b(eq, &slow);
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
GenerateMiss(masm);
@ -493,27 +447,28 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Register receiver = ReceiverRegister();
Register key = NameRegister();
Register value = ValueRegister();
DCHECK(receiver.is(r1));
DCHECK(key.is(r2));
DCHECK(value.is(r0));
Label slow, notin;
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
MemOperand mapped_location = GenerateMappedArgumentsLookup(
masm, receiver, key, r3, r4, r5, &notin, &slow);
__ str(value, mapped_location);
__ add(r6, r3, r5);
__ mov(r9, r0);
__ mov(r9, value);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
__ str(value, unmapped_location);
__ add(r6, r3, r4);
__ mov(r9, r0);
__ mov(r9, value);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
@ -522,16 +477,12 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
// The return address is in lr.
Isolate* isolate = masm->isolate();
__ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
__ Push(r1, r0);
__ Push(ReceiverRegister(), NameRegister());
// Perform tail call to the entry.
ExternalReference ref =
@ -541,30 +492,51 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
}
// IC register specifications
const Register LoadIC::ReceiverRegister() { return r1; }
const Register LoadIC::NameRegister() { return r2; }
const Register LoadIC::SlotRegister() {
DCHECK(FLAG_vector_ics);
return r0;
}
const Register LoadIC::VectorRegister() {
DCHECK(FLAG_vector_ics);
return r3;
}
const Register StoreIC::ReceiverRegister() { return r1; }
const Register StoreIC::NameRegister() { return r2; }
const Register StoreIC::ValueRegister() { return r0; }
const Register KeyedStoreIC::MapRegister() {
return r3;
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
// The return address is in lr.
__ Push(r1, r0);
__ Push(ReceiverRegister(), NameRegister());
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
Register key = NameRegister();
Register receiver = ReceiverRegister();
DCHECK(key.is(r2));
DCHECK(receiver.is(r1));
Isolate* isolate = masm->isolate();
@ -575,14 +547,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r2, r3, &check_number_dictionary);
__ CheckFastElements(r0, r3, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
__ Ret();
__ bind(&check_number_dictionary);
@ -590,31 +562,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// r0: key
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ SmiUntag(r2, r0);
__ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ SmiUntag(r0, key);
__ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
// Slow case, key and receiver still in r2 and r1.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
1, r2, r3);
1, r4, r3);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary.
__ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r4, ip);
@ -622,9 +593,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the name hash.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset));
__ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(r3, r3, Operand(mask));
@ -644,26 +615,24 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Label try_next_entry;
// Load map and move r4 to next entry.
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ cmp(r0, r5);
__ b(ne, &try_next_entry);
__ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
__ cmp(r0, r5);
__ cmp(key, r5);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
// Last entry: Load map and move r4 to name.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ cmp(r0, r5);
__ b(ne, &slow);
__ ldr(r5, MemOperand(r4));
__ cmp(r0, r5);
__ cmp(key, r5);
__ b(ne, &slow);
// Get field offset.
// r0 : key
// r1 : receiver
// r2 : receiver's map
// r0 : receiver's map
// r3 : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
@ -676,7 +645,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ add(r3, r3, Operand(i));
}
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
__ sub(r5, r5, r6, SetCC);
__ b(ge, &property_array_property);
if (i != 0) {
@ -686,36 +655,34 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
__ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
__ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
__ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
__ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1, r2, r3);
1, r4, r3);
__ Ret();
// Load property array property.
__ bind(&property_array_property);
__ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
__ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
__ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
__ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1, r2, r3);
1, r4, r3);
__ Ret();
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// r1: receiver
// r0: key
// r3: elements
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
__ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
__ IncrementCounter(
isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3);
__ Ret();
__ bind(&index_name);
@ -726,17 +693,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key (index)
// -- r1 : receiver
// -----------------------------------
// Return address is in lr.
Label miss;
Register receiver = r1;
Register index = r0;
Register receiver = ReceiverRegister();
Register index = NameRegister();
Register scratch = r3;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
StringCharAtGenerator char_at_generator(receiver,
index,
@ -758,39 +722,41 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
// Return address is in lr.
Label slow;
Register receiver = ReceiverRegister();
Register key = NameRegister();
Register scratch1 = r3;
Register scratch2 = r4;
DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
// Check that the receiver isn't a smi.
__ JumpIfSmi(r1, &slow);
__ JumpIfSmi(receiver, &slow);
// Check that the key is an array index, that is Uint32.
__ NonNegativeSmiTst(r0);
__ NonNegativeSmiTst(key);
__ b(ne, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
__ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
__ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
__ b(ne, &slow);
// Everything is fine, call runtime.
__ Push(r1, r0); // Receiver, key.
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
__ TailCallExternalReference(
ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
ExternalReference(IC_Utility(kLoadElementWithInterceptor),
masm->isolate()),
2,
1);
2, 1);
__ bind(&slow);
GenerateMiss(masm);
@ -798,15 +764,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
ExternalReference ref =
ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@ -815,15 +774,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r2 : key
// -- r1 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r1, r2, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@ -834,15 +786,8 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
@ -854,21 +799,13 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
__ Push(r1, r0);
__ Push(r0);
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
__ TailCallRuntime(Runtime::kSetProperty, 4, 1);
}
@ -998,10 +935,10 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
ElementsTransitionGenerator::GenerateSmiToDouble(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@ -1012,10 +949,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1028,9 +964,9 @@ static void KeyedStoreGenerateGenericHelper(
receiver_map,
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@ -1049,9 +985,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label array, extra, check_if_double_array;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register value = ValueRegister();
Register key = NameRegister();
Register receiver = ReceiverRegister();
DCHECK(receiver.is(r1));
DCHECK(key.is(r2));
DCHECK(value.is(r0));
Register receiver_map = r3;
Register elements_map = r6;
Register elements = r9; // Elements array of the receiver.
@ -1137,18 +1076,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Register receiver = ReceiverRegister();
Register name = NameRegister();
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
DCHECK(ValueRegister().is(r0));
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
masm, flags, receiver, name, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1156,14 +1095,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
__ Push(r1, r2, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
// Perform tail call to the entry.
ExternalReference ref =
@ -1173,17 +1105,18 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
Label miss;
Register receiver = ReceiverRegister();
Register name = NameRegister();
Register value = ValueRegister();
Register dictionary = r3;
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
DCHECK(value.is(r0));
GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(),
1, r4, r5);
@ -1197,21 +1130,13 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
// -- r2 : name
// -- lr : return address
// -----------------------------------
__ Push(r1, r2, r0);
__ Push(ReceiverRegister(), NameRegister(), ValueRegister());
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r0, Operand(Smi::FromInt(strict_mode)));
__ Push(r1, r0);
__ Push(r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
__ TailCallRuntime(Runtime::kSetProperty, 4, 1);
}
@ -1293,20 +1218,20 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
DCHECK(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr));
DCHECK(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
DCHECK(Assembler::GetCondition(branch_instr) == ne);
patcher.EmitCondition(eq);
}
}

438
deps/v8/src/arm/lithium-arm.cc

File diff suppressed because it is too large

117
deps/v8/src/arm/lithium-arm.h

@ -5,11 +5,11 @@
#ifndef V8_ARM_LITHIUM_ARM_H_
#define V8_ARM_LITHIUM_ARM_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
#include "utils.h"
#include "src/hydrogen.h"
#include "src/lithium.h"
#include "src/lithium-allocator.h"
#include "src/safepoint-table.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
@ -21,6 +21,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -110,6 +111,7 @@ class LCodeGen;
V(MathClz32) \
V(MathExp) \
V(MathFloor) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
@ -139,6 +141,7 @@ class LCodeGen;
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
@ -171,7 +174,7 @@ class LCodeGen;
return mnemonic; \
} \
static L##type* cast(LInstruction* instr) { \
ASSERT(instr->Is##type()); \
DCHECK(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
@ -220,6 +223,9 @@ class LInstruction : public ZoneObject {
virtual bool IsControl() const { return false; }
// Try deleting this instruction if possible.
virtual bool TryDelete() { return false; }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_ != NULL; }
@ -258,11 +264,12 @@ class LInstruction : public ZoneObject {
void VerifyCall();
#endif
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
private:
// Iterator support.
friend class InputIterator;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
friend class TempIterator;
virtual int TempCount() = 0;
@ -327,7 +334,7 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
virtual bool IsGap() const V8_OVERRIDE { return true; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
DCHECK(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
@ -407,7 +414,7 @@ class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
public:
explicit LDummy() { }
LDummy() {}
DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
};
@ -423,6 +430,7 @@ class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
virtual bool IsControl() const V8_OVERRIDE { return true; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
};
@ -872,6 +880,16 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
class LMathFround V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathFround(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
};
class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LMathAbs(LOperand* context, LOperand* value) {
@ -1560,7 +1578,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
return parameter_count()->IsConstantOperand();
}
LConstantOperand* constant_parameter_count() {
ASSERT(has_constant_parameter_count());
DCHECK(has_constant_parameter_count());
return LConstantOperand::cast(parameter_count());
}
LOperand* parameter_count() { return inputs_[2]; }
@ -1582,15 +1600,17 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadNamedGeneric(LOperand* context, LOperand* object) {
LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@ -1647,23 +1667,27 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
uint32_t additional_index() const { return hydrogen()->index_offset(); }
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 1> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
@ -1674,15 +1698,18 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
LOperand* vector) {
inputs_[0] = context;
inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@ -1768,15 +1795,15 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
temps_[0] = code_object;
inputs_[1] = code_object;
}
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return temps_[0]; }
LOperand* code_object() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
@ -1847,18 +1874,18 @@ class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
public:
LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
ZoneList<LOperand*>& operands,
LCallWithDescriptor(const InterfaceDescriptor* descriptor,
const ZoneList<LOperand*>& operands,
Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor->environment_length() + 1, zone) {
ASSERT(descriptor->environment_length() + 1 == operands.length());
inputs_(descriptor->GetRegisterParameterCount() + 1, zone) {
DCHECK(descriptor->GetRegisterParameterCount() + 1 == operands.length());
inputs_.AddAll(operands, zone);
}
LOperand* target() const { return inputs_[0]; }
const CallInterfaceDescriptor* descriptor() { return descriptor_; }
const InterfaceDescriptor* descriptor() { return descriptor_; }
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -1868,7 +1895,7 @@ class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
int arity() const { return hydrogen()->argument_count() - 1; }
const CallInterfaceDescriptor* descriptor_;
const InterfaceDescriptor* descriptor_;
ZoneList<LOperand*> inputs_;
// Iterator support.
@ -2222,7 +2249,7 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
}
return hydrogen()->NeedsCanonicalization();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
@ -2668,6 +2695,35 @@ class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
};
class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreFrameContext(LOperand* context) {
inputs_[0] = context;
}
LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
};
class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
public:
LAllocateBlockContext(LOperand* context, LOperand* function) {
inputs_[0] = context;
inputs_[1] = function;
}
LOperand* context() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
};
class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
@ -2697,8 +2753,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// Build the sequence for the graph.
LPlatformChunk* Build();
LInstruction* CheckElideControlInstruction(HControlInstruction* instr);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@ -2712,6 +2766,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
@ -2792,6 +2847,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LUnallocated* TempDoubleRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
@ -2821,6 +2877,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
void VisitInstruction(HInstruction* current);
void AddInstruction(LInstruction* instr, HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);

867
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

53
deps/v8/src/arm/lithium-codegen-arm.h

@ -5,14 +5,14 @@
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
#include "src/arm/lithium-arm.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "utils.h"
#include "src/arm/lithium-gap-resolver-arm.h"
#include "src/deoptimizer.h"
#include "src/lithium-codegen.h"
#include "src/safepoint-table.h"
#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
@ -116,7 +116,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
@ -133,8 +133,7 @@ class LCodeGen: public LCodeGenBase {
int constant_key,
int element_size,
int shift_size,
int additional_index,
int additional_offset);
int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@ -271,9 +270,6 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordAndWritePosition(int position) V8_OVERRIDE;
@ -357,38 +353,17 @@ class LCodeGen: public LCodeGenBase {
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters:
DCHECK(codegen_->info()->is_calling());
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
codegen_->masm_->PushSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PushSafepointRegistersAndDoubles();
break;
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters:
DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
codegen_->masm_->PopSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PopSafepointRegistersAndDoubles();
break;
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}

44
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h"
#include "src/arm/lithium-codegen-arm.h"
#include "src/arm/lithium-gap-resolver-arm.h"
namespace v8 {
namespace internal {
@ -29,7 +29,7 @@ LGapResolver::LGapResolver(LCodeGen* owner)
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
DCHECK(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
@ -50,13 +50,13 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
if (need_to_restore_root_) {
ASSERT(kSavedValueRegister.is(kRootRegister));
DCHECK(kSavedValueRegister.is(kRootRegister));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
@ -94,13 +94,13 @@ void LGapResolver::PerformMove(int index) {
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
DCHECK(!moves_[index].IsPending());
DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
@ -127,7 +127,7 @@ void LGapResolver::PerformMove(int index) {
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
DCHECK(other_move.IsPending());
BreakCycle(index);
return;
}
@ -138,12 +138,12 @@ void LGapResolver::PerformMove(int index) {
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
@ -154,8 +154,8 @@ void LGapResolver::BreakCycle(int index) {
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
DCHECK(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
@ -178,8 +178,8 @@ void LGapResolver::BreakCycle(int index) {
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
DCHECK(in_cycle_);
DCHECK(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
@ -210,7 +210,7 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
DCHECK(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
@ -218,7 +218,7 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsRegister()) {
__ ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (!destination_operand.OffsetIsUint12Encodable()) {
// ip is overwritten while saving the value to the destination.
@ -248,8 +248,8 @@ void LGapResolver::EmitMove(int index) {
double v = cgen_->ToDouble(constant_source);
__ Vmov(result, v, ip);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
DCHECK(destination->IsStackSlot());
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
ASSERT(destination->IsDoubleStackSlot());
DCHECK(destination->IsDoubleStackSlot());
__ vstr(source_register, cgen_->ToMemOperand(destination));
}
@ -276,7 +276,7 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
ASSERT(destination->IsDoubleStackSlot());
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kScratchDoubleReg was used to break the cycle.

4
deps/v8/src/arm/lithium-gap-resolver-arm.h

@ -5,9 +5,9 @@
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#include "v8.h"
#include "src/v8.h"
#include "lithium.h"
#include "src/lithium.h"
namespace v8 {
namespace internal {

647
deps/v8/src/arm/macro-assembler-arm.cc

File diff suppressed because it is too large

160
deps/v8/src/arm/macro-assembler-arm.h

@ -5,9 +5,9 @@
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
#include "frames.h"
#include "v8globals.h"
#include "src/assembler.h"
#include "src/frames.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
@ -37,6 +37,10 @@ enum TaggingMode {
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum PointersToHereCheck {
kPointersToHereMaybeInteresting,
kPointersToHereAreAlwaysInteresting
};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
@ -54,7 +58,9 @@ bool AreAliased(Register reg1,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
Register reg6 = no_reg,
Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
@ -72,12 +78,11 @@ class MacroAssembler: public Assembler {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
// checking the call size and emitting the actual call.
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
@ -86,6 +91,12 @@ class MacroAssembler: public Assembler {
Address target,
RelocInfo::Mode rmode,
Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
@ -113,7 +124,8 @@ class MacroAssembler: public Assembler {
Register scratch = no_reg,
Condition cond = al);
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void Ubfx(Register dst, Register src, int lsb, int width,
@ -140,6 +152,9 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, Condition cond = al) {
if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond);
}
void Move(DwVfpRegister dst, DwVfpRegister src);
void Load(Register dst, const MemOperand& src, Representation r);
@ -244,7 +259,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
@ -256,7 +273,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
@ -264,9 +283,17 @@ class MacroAssembler: public Assembler {
lr_status,
save_fp,
remembered_set_action,
smi_check);
smi_check,
pointers_to_here_check_for_value);
}
void RecordWriteForMap(
Register object,
Register map,
Register dst,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
@ -277,7 +304,9 @@ class MacroAssembler: public Assembler {
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Push a handle.
void Push(Handle<Object> handle);
@ -285,7 +314,7 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
DCHECK(!src1.is(src2));
if (src1.code() > src2.code()) {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
} else {
@ -296,9 +325,9 @@ class MacroAssembler: public Assembler {
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@ -318,12 +347,12 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
ASSERT(!src1.is(src4));
ASSERT(!src2.is(src4));
ASSERT(!src3.is(src4));
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!src1.is(src4));
DCHECK(!src2.is(src4));
DCHECK(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@ -347,7 +376,7 @@ class MacroAssembler: public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
DCHECK(!src1.is(src2));
if (src1.code() > src2.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
} else {
@ -358,9 +387,9 @@ class MacroAssembler: public Assembler {
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@ -380,12 +409,12 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
ASSERT(!src1.is(src4));
ASSERT(!src2.is(src4));
ASSERT(!src3.is(src4));
DCHECK(!src1.is(src2));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src3));
DCHECK(!src1.is(src4));
DCHECK(!src2.is(src4));
DCHECK(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@ -417,12 +446,9 @@ class MacroAssembler: public Assembler {
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
@ -519,7 +545,8 @@ class MacroAssembler: public Assembler {
Label* not_int32);
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
void StubPrologue();
void Prologue(bool code_pre_aging);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@ -630,12 +657,6 @@ class MacroAssembler: public Assembler {
// handler chain.
void ThrowUncatchable(Register value);
// Throw a message string as an exception.
void Throw(BailoutReason reason);
// Throw a message string as an exception if a condition is not true.
void ThrowIf(Condition cc, BailoutReason reason);
// ---------------------------------------------------------------------------
// Inline caching support
@ -666,7 +687,7 @@ class MacroAssembler: public Assembler {
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
@ -686,7 +707,7 @@ class MacroAssembler: public Assembler {
(FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
ASSERT((type == -1) ||
DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
@ -764,7 +785,8 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
TaggingMode tagging_mode = TAG_RESULT);
TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
@ -925,7 +947,7 @@ class MacroAssembler: public Assembler {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
ASSERT_EQ(0, kStringTag);
DCHECK_EQ(0, kStringTag);
return eq;
}
@ -1122,7 +1144,7 @@ class MacroAssembler: public Assembler {
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
DCHECK(!code_object_.is_null());
return code_object_;
}
@ -1166,7 +1188,7 @@ class MacroAssembler: public Assembler {
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#ifdef __arm__
return OS::ArmUsingHardFloat();
return base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
return true;
#else
@ -1339,8 +1361,8 @@ class MacroAssembler: public Assembler {
// Get the location of a relocated constant (its address in the constant pool)
// from its load site.
void GetRelocatedValueLocation(Register ldr_location,
Register result);
void GetRelocatedValueLocation(Register ldr_location, Register result,
Register scratch);
void ClampUint8(Register output_reg, Register input_reg);
@ -1354,12 +1376,36 @@ class MacroAssembler: public Assembler {
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
void DecodeField(Register dst, Register src) {
Ubfx(dst, src, Field::kShift, Field::kSize);
}
template<typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
template<typename Field>
void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
static const int mask = (Field::kMask >> shift) << kSmiTagSize;
mov(reg, Operand(reg, LSR, shift));
and_(reg, reg, Operand(mask));
static const int mask = Field::kMask >> shift << kSmiTagSize;
STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
STATIC_ASSERT(kSmiTag == 0);
if (shift < kSmiTagSize) {
mov(dst, Operand(src, LSL, kSmiTagSize - shift));
and_(dst, dst, Operand(mask));
} else if (shift > kSmiTagSize) {
mov(dst, Operand(src, LSR, shift - kSmiTagSize));
and_(dst, dst, Operand(mask));
} else {
and_(dst, src, Operand(mask));
}
}
template<typename Field>
void DecodeFieldToSmi(Register reg) {
DecodeField<Field>(reg, reg);
}
// Activation support.
@ -1501,7 +1547,7 @@ class FrameAndConstantPoolScope {
old_constant_pool_available_(masm->is_constant_pool_available()) {
// We only want to enable constant pool access for non-manual frame scopes
// to ensure the constant pool pointer is valid throughout the scope.
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm->set_has_frame(true);
masm->set_constant_pool_available(true);
masm->EnterFrame(type, !old_constant_pool_available_);
@ -1519,7 +1565,7 @@ class FrameAndConstantPoolScope {
// scope, the MacroAssembler is still marked as being in a frame scope, and
// the code will be generated again when it goes out of scope.
void GenerateLeaveFrame() {
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm_->LeaveFrame(type_);
}

72
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -2,18 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "cpu-profiler.h"
#include "unicode.h"
#include "log.h"
#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/unicode.h"
#include "src/arm/regexp-macro-assembler-arm.h"
namespace v8 {
namespace internal {
@ -109,7 +110,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
success_label_(),
backtrack_label_(),
exit_label_() {
ASSERT_EQ(0, registers_to_save % 2);
DCHECK_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
@ -142,8 +143,8 @@ void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
DCHECK(reg >= 0);
DCHECK(reg < num_registers_);
if (by != 0) {
__ ldr(r0, register_location(reg));
__ add(r0, r0, Operand(by));
@ -286,7 +287,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r2, end_of_input_address());
} else {
ASSERT(mode_ == UC16);
DCHECK(mode_ == UC16);
int argument_count = 4;
__ PrepareCallCFunction(argument_count, r2);
@ -357,7 +358,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
__ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
} else {
ASSERT(mode_ == UC16);
DCHECK(mode_ == UC16);
__ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
}
@ -410,7 +411,7 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
uc16 minus,
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
DCHECK(minus < String::kMaxUtf16CodeUnit);
__ sub(r0, current_character(), Operand(minus));
__ and_(r0, r0, Operand(mask));
__ cmp(r0, Operand(c));
@ -709,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ add(r1, r1, Operand(r2));
// r1 is length of string in characters.
ASSERT_EQ(0, num_saved_registers_ % 2);
DCHECK_EQ(0, num_saved_registers_ % 2);
// Always an even number of capture registers. This allows us to
// unroll the loop once to add an operation between a load of a register
// and the following use of that register.
@ -894,8 +895,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
CheckPosition(cp_offset + characters - 1, on_end_of_input);
}
@ -960,7 +961,7 @@ void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(r0, Operand(to));
__ str(r0, register_location(register_index));
}
@ -984,7 +985,7 @@ void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
ASSERT(reg_from <= reg_to);
DCHECK(reg_from <= reg_to);
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ str(r0, register_location(reg));
@ -1010,8 +1011,8 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
__ mov(r1, Operand(masm_->CodeObject()));
// We need to make room for the return address on the stack.
int stack_alignment = OS::ActivationFrameAlignment();
ASSERT(IsAligned(stack_alignment, kPointerSize));
int stack_alignment = base::OS::ActivationFrameAlignment();
DCHECK(IsAligned(stack_alignment, kPointerSize));
__ sub(sp, sp, Operand(stack_alignment));
// r0 will point to the return address, placed by DirectCEntry.
@ -1026,7 +1027,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
// Drop the return address from the stack.
__ add(sp, sp, Operand(stack_alignment));
ASSERT(stack_alignment != 0);
DCHECK(stack_alignment != 0);
__ ldr(sp, MemOperand(sp, 0));
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@ -1044,7 +1045,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
if (isolate->stack_guard()->IsStackOverflow()) {
StackLimitCheck check(isolate);
if (check.JsHasOverflowed()) {
isolate->StackOverflow();
return EXCEPTION;
}
@ -1067,11 +1069,11 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
// Current string.
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
Object* result = Execution::HandleStackGuardInterrupt(isolate);
Object* result = isolate->stack_guard()->HandleInterrupts();
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();
@ -1107,7 +1109,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
// be a sequential or external string with the same content.
// Update the start and end pointers in the stack frame to the current
// location (whether it has actually moved or not).
ASSERT(StringShape(*subject_tmp).IsSequential() ||
DCHECK(StringShape(*subject_tmp).IsSequential() ||
StringShape(*subject_tmp).IsExternal());
// The original start address of the characters to match.
@ -1139,7 +1141,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
ASSERT(register_index < (1<<30));
DCHECK(register_index < (1<<30));
if (num_registers_ <= register_index) {
num_registers_ = register_index + 1;
}
@ -1192,14 +1194,14 @@ void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
void RegExpMacroAssemblerARM::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
DCHECK(!source.is(backtrack_stackpointer()));
__ str(source,
MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
}
void RegExpMacroAssemblerARM::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
DCHECK(!target.is(backtrack_stackpointer()));
__ ldr(target,
MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
}
@ -1244,7 +1246,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
// If unaligned load/stores are not supported then this function must only
// be used to load a single character at a time.
if (!CanReadUnaligned()) {
ASSERT(characters == 1);
DCHECK(characters == 1);
}
if (mode_ == ASCII) {
@ -1253,15 +1255,15 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
} else if (characters == 2) {
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
} else {
ASSERT(characters == 1);
DCHECK(characters == 1);
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
}
} else {
ASSERT(mode_ == UC16);
DCHECK(mode_ == UC16);
if (characters == 2) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else {
ASSERT(characters == 1);
DCHECK(characters == 1);
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
}
}

6
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -5,9 +5,9 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
#include "macro-assembler.h"
#include "src/arm/assembler-arm.h"
#include "src/arm/assembler-arm-inl.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {

229
deps/v8/src/arm/simulator-arm.cc

@ -6,15 +6,15 @@
#include <stdlib.h>
#include <cmath>
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "disasm.h"
#include "assembler.h"
#include "codegen.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
#include "src/arm/constants-arm.h"
#include "src/arm/simulator-arm.h"
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/disasm.h"
#if defined(USE_SIMULATOR)
@ -87,7 +87,7 @@ void ArmDebugger::Stop(Instruction* instr) {
char** msg_address =
reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *msg_address;
ASSERT(msg != NULL);
DCHECK(msg != NULL);
// Update this stop description.
if (isWatchedStop(code) && !watched_stops_[code].desc) {
@ -342,17 +342,18 @@ void ArmDebugger::Debug() {
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
OFStream os(stdout);
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
PrintF("%s: \n", arg1);
os << arg1 << ": \n";
#ifdef DEBUG
obj->PrintLn();
obj->Print(os);
os << "\n";
#else
obj->ShortPrint();
PrintF("\n");
os << Brief(obj) << "\n";
#endif
} else {
PrintF("%s unrecognized\n", arg1);
os << arg1 << " unrecognized\n";
}
} else {
PrintF("printobject <value>\n");
@ -451,7 +452,7 @@ void ArmDebugger::Debug() {
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
v8::internal::OS::DebugBreak();
v8::base::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (argc == 2) {
@ -607,8 +608,8 @@ void ArmDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
return one == two;
}
@ -645,7 +646,7 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask);
DCHECK_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
@ -670,10 +671,10 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
intptr_t start,
int size) {
ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0);
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
DCHECK((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@ -694,12 +695,12 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK(memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
Instruction::kInstrSize) == 0);
CHECK_EQ(0,
memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset), Instruction::kInstrSize));
} else {
// Cache miss. Load memory into the cache.
OS::MemCopy(cached_line, line, CachePage::kLineLength);
memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@ -813,7 +814,7 @@ class Redirection {
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
ASSERT_EQ(current->type(), type);
DCHECK_EQ(current->type(), type);
return current;
}
}
@ -852,7 +853,7 @@ void* Simulator::RedirectExternalReference(void* external_function,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
ASSERT(isolate_data != NULL);
DCHECK(isolate_data != NULL);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
@ -867,7 +868,7 @@ Simulator* Simulator::current(Isolate* isolate) {
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int32_t value) {
ASSERT((reg >= 0) && (reg < num_registers));
DCHECK((reg >= 0) && (reg < num_registers));
if (reg == pc) {
pc_modified_ = true;
}
@ -878,7 +879,7 @@ void Simulator::set_register(int reg, int32_t value) {
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
ASSERT((reg >= 0) && (reg < num_registers));
DCHECK((reg >= 0) && (reg < num_registers));
// Stupid code added to avoid bug in GCC.
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
@ -888,75 +889,75 @@ int32_t Simulator::get_register(int reg) const {
double Simulator::get_double_from_register_pair(int reg) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_registers_[0])];
OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
void Simulator::set_register_pair_from_double(int reg, double* value) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
memcpy(registers_ + reg, value, sizeof(*value));
}
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
DCHECK((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
registers_[dreg + 1] = dbl[1];
}
void Simulator::get_d_register(int dreg, uint64_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
}
void Simulator::set_d_register(int dreg, const uint64_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
}
void Simulator::get_d_register(int dreg, uint32_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
}
void Simulator::set_d_register(int dreg, const uint32_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
}
void Simulator::get_q_register(int qreg, uint64_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
DCHECK((qreg >= 0) && (qreg < num_q_registers));
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
}
void Simulator::set_q_register(int qreg, const uint64_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
DCHECK((qreg >= 0) && (qreg < num_q_registers));
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
}
void Simulator::get_q_register(int qreg, uint32_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
DCHECK((qreg >= 0) && (qreg < num_q_registers));
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
}
void Simulator::set_q_register(int qreg, const uint32_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
DCHECK((qreg >= 0) && (qreg < num_q_registers));
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
}
@ -981,41 +982,41 @@ int32_t Simulator::get_pc() const {
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
DCHECK((sreg >= 0) && (sreg < num_s_registers));
vfp_registers_[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
DCHECK((sreg >= 0) && (sreg < num_s_registers));
return vfp_registers_[sreg];
}
template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
DCHECK(reg_index >= 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
memcpy(&vfp_registers_[reg_index * register_size], buffer,
register_size * sizeof(vfp_registers_[0]));
}
template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
DCHECK(reg_index >= 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
memcpy(buffer, &vfp_registers_[register_size * reg_index],
register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
@ -1044,14 +1045,14 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_registers_[0])];
OS::MemCopy(buffer, &result, sizeof(buffer));
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
memcpy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
OS::MemCopy(buffer, &result, sizeof(buffer));
memcpy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
OS::MemCopy(registers_, buffer, sizeof(buffer));
memcpy(registers_, buffer, sizeof(buffer));
}
}
@ -1429,7 +1430,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result >>= 1;
} else {
ASSERT(shift_amount >= 32);
DCHECK(shift_amount >= 32);
if (result < 0) {
*carry_out = true;
result = 0xffffffff;
@ -1452,7 +1453,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result = 0;
} else {
ASSERT(shift_amount > 32);
DCHECK(shift_amount > 32);
*carry_out = false;
result = 0;
}
@ -1574,7 +1575,7 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
ASSERT(start_address > 8191 || start_address < 0);
DCHECK(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {
@ -1588,7 +1589,7 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
reg++;
rlist >>= 1;
}
ASSERT(end_address == ((intptr_t)address) - 4);
DCHECK(end_address == ((intptr_t)address) - 4);
if (instr->HasW()) {
set_register(instr->RnValue(), rn_val);
}
@ -1635,19 +1636,19 @@ void Simulator::HandleVList(Instruction* instr) {
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
OS::MemCopy(&d, data, 8);
memcpy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
OS::MemCopy(data, &d, 8);
memcpy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
DCHECK(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
if (instr->HasW()) {
set_register(instr->RnValue(), rn_val);
}
@ -1852,7 +1853,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@ -1928,13 +1929,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
bool Simulator::isWatchedStop(uint32_t code) {
ASSERT(code <= kMaxStopCode);
DCHECK(code <= kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
ASSERT(code <= kMaxStopCode);
DCHECK(code <= kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@ -1942,7 +1943,7 @@ bool Simulator::isEnabledStop(uint32_t code) {
void Simulator::EnableStop(uint32_t code) {
ASSERT(isWatchedStop(code));
DCHECK(isWatchedStop(code));
if (!isEnabledStop(code)) {
watched_stops_[code].count &= ~kStopDisabledBit;
}
@ -1950,7 +1951,7 @@ void Simulator::EnableStop(uint32_t code) {
void Simulator::DisableStop(uint32_t code) {
ASSERT(isWatchedStop(code));
DCHECK(isWatchedStop(code));
if (isEnabledStop(code)) {
watched_stops_[code].count |= kStopDisabledBit;
}
@ -1958,8 +1959,8 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
ASSERT(code <= kMaxStopCode);
ASSERT(isWatchedStop(code));
DCHECK(code <= kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
"Enabling this code and reseting the counter to 0.\n", code);
@ -1973,7 +1974,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
ASSERT(code <= kMaxStopCode);
DCHECK(code <= kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@ -2091,7 +2092,7 @@ void Simulator::DecodeType01(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= rm_val;
set_register(rn, rn_val);
@ -2099,7 +2100,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val += rm_val;
set_register(rn, rn_val);
@ -2134,7 +2135,7 @@ void Simulator::DecodeType01(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= imm_val;
set_register(rn, rn_val);
@ -2142,7 +2143,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val += imm_val;
set_register(rn, rn_val);
@ -2174,7 +2175,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
ASSERT((rd % 2) == 0);
DCHECK((rd % 2) == 0);
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
@ -2205,8 +2206,8 @@ void Simulator::DecodeType01(Instruction* instr) {
}
} else {
// signed byte loads
ASSERT(instr->HasSign());
ASSERT(instr->HasL());
DCHECK(instr->HasSign());
DCHECK(instr->HasL());
int8_t val = ReadB(addr);
set_register(rd, val);
}
@ -2270,7 +2271,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
ASSERT(instr->TypeValue() == 1);
DCHECK(instr->TypeValue() == 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@ -2493,7 +2494,7 @@ void Simulator::DecodeType2(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val -= im_val;
set_register(rn, rn_val);
@ -2501,7 +2502,7 @@ void Simulator::DecodeType2(Instruction* instr) {
}
case ia_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
addr = rn_val;
rn_val += im_val;
set_register(rn, rn_val);
@ -2557,7 +2558,7 @@ void Simulator::DecodeType3(Instruction* instr) {
int32_t addr = 0;
switch (instr->PUField()) {
case da_x: {
ASSERT(!instr->HasW());
DCHECK(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
break;
@ -2710,18 +2711,21 @@ void Simulator::DecodeType3(Instruction* instr) {
}
case db_x: {
if (FLAG_enable_sudiv) {
if (!instr->HasW()) {
if (instr->Bits(5, 4) == 0x1) {
if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
// sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
// Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
// (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
// Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
int rm = instr->RmValue();
int32_t rm_val = get_register(rm);
int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t ret_val = 0;
ASSERT(rs_val != 0);
if ((rm_val == kMinInt) && (rs_val == -1)) {
DCHECK(rs_val != 0);
// udiv
if (instr->Bit(21) == 0x1) {
ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
static_cast<uint32_t>(rs_val));
} else if ((rm_val == kMinInt) && (rs_val == -1)) {
ret_val = kMinInt;
} else {
ret_val = rm_val / rs_val;
@ -2731,7 +2735,6 @@ void Simulator::DecodeType3(Instruction* instr) {
}
}
}
}
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand;
if (instr->HasW()) {
@ -2771,7 +2774,7 @@ void Simulator::DecodeType3(Instruction* instr) {
uint32_t rd_val =
static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
uint32_t mask = (1 << bitcount) - 1;
uint32_t mask = 0xffffffffu >> (32 - bitcount);
rd_val &= ~(mask << lsbit);
if (instr->RmValue() != 15) {
// bfi - bitfield insert.
@ -2818,7 +2821,7 @@ void Simulator::DecodeType3(Instruction* instr) {
void Simulator::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
HandleRList(instr, true);
@ -2872,8 +2875,8 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmrs
// Dd = vsqrt(Dm)
void Simulator::DecodeTypeVFP(Instruction* instr) {
ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
// Obtain double precision register codes.
int vm = instr->VFPMRegValue(kDoublePrecision);
@ -3020,9 +3023,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
OS::MemCopy(data, &dd_value, 8);
memcpy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
OS::MemCopy(&dd_value, data, 8);
memcpy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x1) &&
@ -3031,7 +3034,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dn_value = get_double_from_d_register(vn);
int32_t data[2];
OS::MemCopy(data, &dn_value, 8);
memcpy(data, &dn_value, 8);
set_register(instr->RtValue(), data[instr->Bit(21)]);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
@ -3088,7 +3091,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
DCHECK((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
int t = instr->RtValue();
@ -3106,8 +3109,8 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
void Simulator::DecodeVCMP(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1));
// Comparison.
@ -3144,8 +3147,8 @@ void Simulator::DecodeVCMP(Instruction* instr) {
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
VFPRegPrecision dst_precision = kDoublePrecision;
VFPRegPrecision src_precision = kSinglePrecision;
@ -3169,7 +3172,7 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
bool get_inv_op_vfp_flag(VFPRoundingMode mode,
double val,
bool unsigned_) {
ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
double max_uint = static_cast<double>(0xffffffffu);
double max_int = static_cast<double>(kMaxInt);
double min_int = static_cast<double>(kMinInt);
@ -3222,9 +3225,9 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
(instr->Bits(27, 23) == 0x1D));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
DCHECK(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
@ -3248,7 +3251,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// mode or the default Round to Zero mode.
VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
: RZ;
ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
DCHECK((mode == RM) || (mode == RZ) || (mode == RN));
bool unsigned_integer = (instr->Bit(16) == 0);
bool double_precision = (src_precision == kDoublePrecision);
@ -3332,7 +3335,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ASSERT((instr->TypeValue() == 6));
DCHECK((instr->TypeValue() == 6));
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@ -3382,13 +3385,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
if (instr->HasL()) {
int32_t data[2];
double d = get_double_from_d_register(vm);
OS::MemCopy(data, &d, 8);
memcpy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
OS::MemCopy(&d, data, 8);
memcpy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
@ -3411,13 +3414,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ReadW(address + 4, instr)
};
double val;
OS::MemCopy(&val, data, 8);
memcpy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
int32_t data[2];
double val = get_double_from_d_register(vd);
OS::MemCopy(data, &val, 8);
memcpy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
@ -3753,7 +3756,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
DCHECK(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
@ -3763,8 +3766,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
if (base::OS::ActivationFrameAlignment() != 0) {
entry_stack &= -base::OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);

17
deps/v8/src/arm/simulator-arm.h

@ -13,7 +13,7 @@
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
#include "allocation.h"
#include "src/allocation.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@ -37,9 +37,6 @@ typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
// just use the C stack limit.
@ -63,9 +60,9 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
#include "constants-arm.h"
#include "hashmap.h"
#include "assembler.h"
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/hashmap.h"
namespace v8 {
namespace internal {
@ -265,7 +262,7 @@ class Simulator {
inline int GetCarry() {
return c_flag_ ? 1 : 0;
};
}
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
@ -436,10 +433,6 @@ class Simulator {
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small

926
deps/v8/src/arm/stub-cache-arm.cc

File diff suppressed because it is too large

456
deps/v8/src/arm64/assembler-arm64-inl.h

File diff suppressed because it is too large

1146
deps/v8/src/arm64/assembler-arm64.cc

File diff suppressed because it is too large

384
deps/v8/src/arm64/assembler-arm64.h

@ -7,13 +7,13 @@
#include <list>
#include <map>
#include <vector>
#include "globals.h"
#include "utils.h"
#include "assembler.h"
#include "serialize.h"
#include "arm64/instructions-arm64.h"
#include "arm64/cpu-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
#include "src/globals.h"
#include "src/serialize.h"
#include "src/utils.h"
namespace v8 {
@ -66,6 +66,7 @@ struct CPURegister {
bool IsValidFPRegister() const;
bool IsNone() const;
bool Is(const CPURegister& other) const;
bool Aliases(const CPURegister& other) const;
bool IsZero() const;
bool IsSP() const;
@ -105,18 +106,18 @@ struct Register : public CPURegister {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
ASSERT(IsValidOrNone());
DCHECK(IsValidOrNone());
}
Register(const Register& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
ASSERT(IsValidOrNone());
DCHECK(IsValidOrNone());
}
bool IsValid() const {
ASSERT(IsRegister() || IsNone());
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
}
@ -168,7 +169,7 @@ struct Register : public CPURegister {
}
static Register FromAllocationIndex(unsigned index) {
ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
// cp is the last allocatable register.
if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
return from_code(kAllocatableContext);
@ -181,8 +182,8 @@ struct Register : public CPURegister {
}
static const char* AllocationIndexToString(int index) {
ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
ASSERT((kAllocatableLowRangeBegin == 0) &&
DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 15) &&
(kAllocatableHighRangeBegin == 18) &&
(kAllocatableHighRangeEnd == 24) &&
@ -198,7 +199,7 @@ struct Register : public CPURegister {
}
static int ToAllocationIndex(Register reg) {
ASSERT(reg.IsAllocatable());
DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
if (code == kAllocatableContext) {
return NumAllocatableRegisters() - 1;
@ -234,18 +235,18 @@ struct FPRegister : public CPURegister {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
ASSERT(IsValidOrNone());
DCHECK(IsValidOrNone());
}
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
ASSERT(IsValidOrNone());
DCHECK(IsValidOrNone());
}
bool IsValid() const {
ASSERT(IsFPRegister() || IsNone());
DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
}
@ -281,7 +282,7 @@ struct FPRegister : public CPURegister {
}
static FPRegister FromAllocationIndex(unsigned int index) {
ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
return (index <= kAllocatableLowRangeEnd)
? from_code(index)
@ -289,8 +290,8 @@ struct FPRegister : public CPURegister {
}
static const char* AllocationIndexToString(int index) {
ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
ASSERT((kAllocatableLowRangeBegin == 0) &&
DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 14) &&
(kAllocatableHighRangeBegin == 16) &&
(kAllocatableHighRangeEnd == 28));
@ -304,7 +305,7 @@ struct FPRegister : public CPURegister {
}
static int ToAllocationIndex(FPRegister reg) {
ASSERT(reg.IsAllocatable());
DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
return (code <= kAllocatableLowRangeEnd)
@ -450,40 +451,40 @@ class CPURegList {
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.SizeInBits()), type_(reg1.type()) {
ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
ASSERT(IsValid());
DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
ASSERT(IsValid());
DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
ASSERT(((type == CPURegister::kRegister) &&
DCHECK(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kFPRegister) &&
(last_reg < kNumberOfFPRegisters)));
ASSERT(last_reg >= first_reg);
DCHECK(last_reg >= first_reg);
list_ = (1UL << (last_reg + 1)) - 1;
list_ &= ~((1UL << first_reg) - 1);
ASSERT(IsValid());
DCHECK(IsValid());
}
CPURegister::RegisterType type() const {
ASSERT(IsValid());
DCHECK(IsValid());
return type_;
}
RegList list() const {
ASSERT(IsValid());
DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
ASSERT(IsValid());
DCHECK(IsValid());
list_ = new_list;
}
@ -528,7 +529,7 @@ class CPURegList {
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
ASSERT(IsValid());
DCHECK(IsValid());
return list_ == 0;
}
@ -536,7 +537,7 @@ class CPURegList {
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
ASSERT(IsValid());
DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
@ -546,21 +547,26 @@ class CPURegList {
}
int Count() const {
ASSERT(IsValid());
DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
unsigned RegisterSizeInBits() const {
ASSERT(IsValid());
DCHECK(IsValid());
return size_;
}
unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
ASSERT((size_in_bits % kBitsPerByte) == 0);
DCHECK((size_in_bits % kBitsPerByte) == 0);
return size_in_bits / kBitsPerByte;
}
unsigned TotalSizeInBytes() const {
DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
private:
RegList list_;
unsigned size_;
@ -593,6 +599,31 @@ class CPURegList {
#define kCallerSaved CPURegList::GetCallerSaved()
#define kCallerSavedFP CPURegList::GetCallerSavedFP()
// -----------------------------------------------------------------------------
// Immediates.
class Immediate {
public:
template<typename T>
inline explicit Immediate(Handle<T> handle);
// This is allowed to be an implicit constructor because Immediate is
// a wrapper class that doesn't normally perform any type conversion.
template<typename T>
inline Immediate(T value); // NOLINT(runtime/explicit)
template<typename T>
inline Immediate(T value, RelocInfo::Mode rmode);
int64_t value() const { return value_; }
RelocInfo::Mode rmode() const { return rmode_; }
private:
void InitializeHandle(Handle<Object> value);
int64_t value_;
RelocInfo::Mode rmode_;
};
// -----------------------------------------------------------------------------
// Operands.
@ -628,8 +659,8 @@ class Operand {
inline Operand(T t); // NOLINT(runtime/explicit)
// Implicit constructor for int types.
template<typename int_t>
inline Operand(int_t t, RelocInfo::Mode rmode);
template<typename T>
inline Operand(T t, RelocInfo::Mode rmode);
inline bool IsImmediate() const;
inline bool IsShiftedRegister() const;
@ -640,36 +671,33 @@ class Operand {
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
inline int64_t immediate() const;
inline Immediate immediate() const;
inline int64_t ImmediateValue() const;
inline Register reg() const;
inline Shift shift() const;
inline Extend extend() const;
inline unsigned shift_amount() const;
// Relocation information.
RelocInfo::Mode rmode() const { return rmode_; }
void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
bool NeedsRelocation(Isolate* isolate) const;
bool NeedsRelocation(const Assembler* assembler) const;
// Helpers
inline static Operand UntagSmi(Register smi);
inline static Operand UntagSmiAndScale(Register smi, int scale);
private:
void initialize_handle(Handle<Object> value);
int64_t immediate_;
Immediate immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
RelocInfo::Mode rmode_;
};
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
inline explicit MemOperand();
inline MemOperand();
inline explicit MemOperand(Register base,
ptrdiff_t offset = 0,
AddrMode addrmode = Offset);
@ -701,6 +729,16 @@ class MemOperand {
// handle indexed modes.
inline Operand OffsetAsOperand() const;
enum PairResult {
kNotPair, // Can't use a pair instruction.
kPairAB, // Can use a pair instruction (operandA has lower address).
kPairBA // Can use a pair instruction (operandB has lower address).
};
// Check if two MemOperand are consistent for stp/ldp use.
static PairResult AreConsistentForPair(const MemOperand& operandA,
const MemOperand& operandB,
int access_size_log2 = kXRegSizeLog2);
private:
Register base_;
Register regoffset_;
@ -712,6 +750,55 @@ class MemOperand {
};
class ConstPool {
public:
explicit ConstPool(Assembler* assm)
: assm_(assm),
first_use_(-1),
shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const {
return shared_entries_count + unique_entries_.size();
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
}
// Distance in bytes between the current pc and the first instruction
// using the pool. If there are no pending entries return kMaxInt.
int DistanceToFirstUse();
// Offset after which instructions using the pool will be out of range.
int MaxPcOffset();
// Maximum size the constant pool can be with current entries. It always
// includes alignment padding and branch over.
int WorstCaseSize();
// Size in bytes of the literal pool *if* it is emitted at the current
// pc. The size will include the branch over the pool if it was requested.
int SizeIfEmittedAtCurrentPc(bool require_jump);
// Emit the literal pool at the current pc with a branch over the pool if
// requested.
void Emit(bool require_jump);
// Discard any pending pool entries.
void Clear();
private:
bool CanBeShared(RelocInfo::Mode mode);
void EmitMarker();
void EmitGuard();
void EmitEntries();
Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_use_;
// values, pc offset(s) of entries which can be shared.
std::multimap<uint64_t, int> shared_entries_;
// Number of distinct literal in shared entries.
int shared_entries_count;
// values, pc offset of entries which cannot be shared.
std::vector<std::pair<uint64_t, int> > unique_entries_;
};
// -----------------------------------------------------------------------------
// Assembler.
@ -735,14 +822,14 @@ class Assembler : public AssemblerBase {
virtual ~Assembler();
virtual void AbortedCodeGeneration() {
num_pending_reloc_info_ = 0;
constpool_.Clear();
}
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
// and data that has already been emitted into the buffer.
//
// In order to avoid any accidental transfer of state, Reset ASSERTs that the
// In order to avoid any accidental transfer of state, Reset DCHECKs that the
// constant pool is not blocked.
void Reset();
@ -782,11 +869,15 @@ class Assembler : public AssemblerBase {
ConstantPoolArray* constant_pool);
inline static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target);
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
Address target);
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
@ -796,6 +887,9 @@ class Assembler : public AssemblerBase {
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
// Return the code target address of the patch debug break slot
inline static Address break_address_from_return_address(Address pc);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@ -822,15 +916,15 @@ class Assembler : public AssemblerBase {
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - buffer_;
}
// Return the code size generated from label to the current position.
uint64_t SizeOfCodeGeneratedSince(const Label* label) {
ASSERT(label->is_bound());
ASSERT(pc_offset() >= label->pos());
ASSERT(pc_offset() < buffer_size_);
DCHECK(label->is_bound());
DCHECK(pc_offset() >= label->pos());
DCHECK(pc_offset() < buffer_size_);
return pc_offset() - label->pos();
}
@ -840,8 +934,8 @@ class Assembler : public AssemblerBase {
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
ASSERT(size >= 0);
ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
DCHECK(size >= 0);
DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
}
// Return the number of instructions generated from label to the
@ -859,7 +953,8 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = 0;
// Number of instructions necessary to be able to later patch it to a call.
// See Debug::GenerateSlot() and BreakLocationIterator::SetDebugBreakAtSlot().
// See DebugCodegen::GenerateSlot() and
// BreakLocationIterator::SetDebugBreakAtSlot().
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
@ -879,9 +974,7 @@ class Assembler : public AssemblerBase {
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
void ConstantPoolMarker(uint32_t size);
void EmitPoolGuard();
void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
@ -925,9 +1018,9 @@ class Assembler : public AssemblerBase {
// function, compiled with and without debugger support (see for example
// Debug::PrepareForBreakPoints()).
// Compiling functions with debugger support generates additional code
// (Debug::GenerateSlot()). This may affect the emission of the pools and
// cause the version of the code with debugger support to have pools generated
// in different places.
// (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
// and cause the version of the code with debugger support to have pools
// generated in different places.
// Recording the position and size of emitted pools allows to correctly
// compute the offset mappings between the different versions of a function in
// all situations.
@ -1124,8 +1217,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@ -1134,15 +1227,15 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
}
// Sbfm aliases.
// Arithmetic shift right.
void asr(const Register& rd, const Register& rn, unsigned shift) {
ASSERT(shift < rd.SizeInBits());
DCHECK(shift < rd.SizeInBits());
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
}
@ -1151,8 +1244,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@ -1161,8 +1254,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
}
@ -1185,13 +1278,13 @@ class Assembler : public AssemblerBase {
// Logical shift left.
void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.SizeInBits();
ASSERT(shift < reg_size);
DCHECK(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
void lsr(const Register& rd, const Register& rn, unsigned shift) {
ASSERT(shift < rd.SizeInBits());
DCHECK(shift < rd.SizeInBits());
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
}
@ -1200,8 +1293,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
@ -1210,8 +1303,8 @@ class Assembler : public AssemblerBase {
const Register& rn,
unsigned lsb,
unsigned width) {
ASSERT(width >= 1);
ASSERT(lsb + width <= rn.SizeInBits());
DCHECK(width >= 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
}
@ -1358,9 +1451,6 @@ class Assembler : public AssemblerBase {
// Memory instructions.
// Load literal from pc + offset_from_pc.
void LoadLiteral(const CPURegister& rt, int offset_from_pc);
// Load integer or FP register.
void ldr(const CPURegister& rt, const MemOperand& src);
@ -1407,12 +1497,11 @@ class Assembler : public AssemblerBase {
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
// Load literal to register.
void ldr(const Register& rt, uint64_t imm);
// Load literal to register from a pc relative address.
void ldr_pcrel(const CPURegister& rt, int imm19);
// Load literal to FP register.
void ldr(const FPRegister& ft, double imm);
void ldr(const FPRegister& ft, float imm);
// Load literal to register.
void ldr(const CPURegister& rt, const Immediate& imm);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
@ -1485,7 +1574,7 @@ class Assembler : public AssemblerBase {
};
void nop(NopMarkerTypes n) {
ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
}
@ -1646,7 +1735,7 @@ class Assembler : public AssemblerBase {
// subsequent instructions.
void EmitStringData(const char * string) {
size_t len = strlen(string) + 1;
ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
@ -1666,7 +1755,9 @@ class Assembler : public AssemblerBase {
// Code generation helpers --------------------------------------------------
unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
@ -1678,44 +1769,44 @@ class Assembler : public AssemblerBase {
// Register encoding.
static Instr Rd(CPURegister rd) {
ASSERT(rd.code() != kSPRegInternalCode);
DCHECK(rd.code() != kSPRegInternalCode);
return rd.code() << Rd_offset;
}
static Instr Rn(CPURegister rn) {
ASSERT(rn.code() != kSPRegInternalCode);
DCHECK(rn.code() != kSPRegInternalCode);
return rn.code() << Rn_offset;
}
static Instr Rm(CPURegister rm) {
ASSERT(rm.code() != kSPRegInternalCode);
DCHECK(rm.code() != kSPRegInternalCode);
return rm.code() << Rm_offset;
}
static Instr Ra(CPURegister ra) {
ASSERT(ra.code() != kSPRegInternalCode);
DCHECK(ra.code() != kSPRegInternalCode);
return ra.code() << Ra_offset;
}
static Instr Rt(CPURegister rt) {
ASSERT(rt.code() != kSPRegInternalCode);
DCHECK(rt.code() != kSPRegInternalCode);
return rt.code() << Rt_offset;
}
static Instr Rt2(CPURegister rt2) {
ASSERT(rt2.code() != kSPRegInternalCode);
DCHECK(rt2.code() != kSPRegInternalCode);
return rt2.code() << Rt2_offset;
}
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
ASSERT(!rd.IsZero());
DCHECK(!rd.IsZero());
return (rd.code() & kRegCodeMask) << Rd_offset;
}
static Instr RnSP(Register rn) {
ASSERT(!rn.IsZero());
DCHECK(!rn.IsZero());
return (rn.code() & kRegCodeMask) << Rn_offset;
}
@ -1830,7 +1921,6 @@ class Assembler : public AssemblerBase {
void CheckVeneerPool(bool force_emit, bool require_jump,
int margin = kVeneerDistanceMargin);
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
@ -1846,10 +1936,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
// Available for constrained code generation scopes. Prefer
// MacroAssembler::Mov() when possible.
inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
@ -1859,6 +1945,10 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(ptrdiff_t offset);
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size);
void Logical(const Register& rd,
const Register& rn,
const Operand& operand,
@ -1916,6 +2006,7 @@ class Assembler : public AssemblerBase {
const CPURegister& rt, const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
// If available, a veneer for this label can be used for other branches in the
@ -1940,19 +2031,10 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
void LoadStorePair(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairOp op);
void LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op);
// Register the relocation information for the operand and load its value
// into rt.
void LoadRelocatedValue(const CPURegister& rt,
const Operand& operand,
LoadLiteralOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
@ -1999,11 +2081,16 @@ class Assembler : public AssemblerBase {
// instructions.
void BlockConstPoolFor(int instructions);
// Set how far from current pc the next constant pool check will be.
void SetNextConstPoolCheckIn(int instructions) {
next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
}
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
pc_ += sizeof(instruction);
@ -2012,8 +2099,8 @@ class Assembler : public AssemblerBase {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
ASSERT(sizeof(*pc_) == 1);
ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
DCHECK(sizeof(*pc_) == 1);
DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
// TODO(all): Somehow register we have some data here. Then we can
// disassemble it correctly.
@ -2030,12 +2117,13 @@ class Assembler : public AssemblerBase {
int next_constant_pool_check_;
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
// If a long code sequence does not contain unconditional jumps, it is
// necessary to emit the constant pool before the pool gets too far from the
// location it is accessed from. In this case, we emit a jump over the emitted
// constant pool.
// Pools are emitted in the instruction stream. They are emitted when:
// * the distance to the first use is above a pre-defined distance or
// * the numbers of entries in the pool is above a pre-defined size or
// * code generation is finished
// If a pool needs to be emitted before code generation is finished a branch
// over the emitted pool will be inserted.
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
@ -2043,34 +2131,22 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckConstPoolIntervalInst = 128;
static const int kCheckConstPoolInterval =
kCheckConstPoolIntervalInst * kInstructionSize;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToConstPool = 4 * KB;
static const int kMaxNumPendingRelocInfo =
kMaxDistToConstPool / kInstructionSize;
// Average distance beetween a constant pool and the first instruction
// accessing the constant pool. Longer distance should result in less I-cache
// pollution.
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches.
static const int kAvgDistToConstPool =
kMaxDistToConstPool - kCheckConstPoolInterval;
static const int kCheckConstPoolInterval = 128;
// Distance to first use after a which a pool will be emitted. Pool entries
// are accessed with pc relative load therefore this cannot be more than
// 1 * MB. Since constant pool emission checks are interval based this value
// is an approximation.
static const int kApproxMaxDistToConstPool = 64 * KB;
// Number of pool entries after which a pool will be emitted. Since constant
// pool emission checks are interval based this value is an approximation.
static const int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_const_pool_use_;
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@ -2086,10 +2162,8 @@ class Assembler : public AssemblerBase {
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// the buffer of pending relocation info
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// The pending constant pool.
ConstPool constpool_;
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@ -2103,7 +2177,7 @@ class Assembler : public AssemblerBase {
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) {
ASSERT(recorded_ast_id_.IsNone());
DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
@ -2151,7 +2225,7 @@ class Assembler : public AssemblerBase {
static const int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
ASSERT(!unresolved_branches_.empty());
DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This is similar to next_constant_pool_check_ and helps reduce the overhead
@ -2176,6 +2250,7 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};
class PatchingAssembler : public Assembler {
@ -2203,24 +2278,21 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler() {
// Const pool should still be blocked.
ASSERT(is_const_pool_blocked());
DCHECK(is_const_pool_blocked());
EndBlockPools();
// Verify we have generated the number of instruction we expected.
ASSERT((pc_offset() + kGap) == buffer_size_);
DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
ASSERT(num_pending_reloc_info() == 0);
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
CPU::FlushICache(buffer_, length);
CpuFeatures::FlushICache(buffer_, length);
}
static const int kMovInt64NInstrs = 4;
void MovInt64(const Register& rd, int64_t imm);
// See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
void PatchAdrFar(Instruction* target);
static const int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(ptrdiff_t target_offset);
};

148
deps/v8/src/arm64/builtins-arm64.cc

@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
#include "stub-cache.h"
#include "src/codegen.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/runtime.h"
#include "src/stub-cache.h"
namespace v8 {
namespace internal {
@ -66,7 +66,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
num_extra_args = 1;
__ Push(x1);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects x0 to contain the number of arguments
@ -294,7 +294,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &ok);
CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ Bind(&ok);
@ -304,7 +304,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@ -315,12 +314,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
// Should never create mementos for api functions.
ASSERT(!is_api_function || !create_memento);
// Should never create mementos before slack tracking is finished.
ASSERT(!count_constructions || !create_memento);
DCHECK(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@ -366,24 +361,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
__ B(eq, &rt_call);
if (count_constructions) {
Register constructon_count = x14;
if (!is_api_function) {
Label allocate;
MemOperand bit_field3 =
FieldMemOperand(init_map, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ Ldr(x4, bit_field3);
__ DecodeField<Map::ConstructionCount>(constructon_count, x4);
__ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ B(eq, &allocate);
// Decrease generous allocation count.
__ Ldr(x3, FieldMemOperand(constructor,
JSFunction::kSharedFunctionInfoOffset));
MemOperand constructor_count =
FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
__ Ldrb(x4, constructor_count);
__ Subs(x4, x4, 1);
__ Strb(x4, constructor_count);
__ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
__ Str(x4, bit_field3);
__ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
__ B(ne, &allocate);
// Push the constructor and map to the stack, and the constructor again
// as argument to the runtime call.
__ Push(constructor, init_map, constructor);
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(init_map, constructor);
__ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ Bind(&allocate);
}
@ -413,8 +412,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(first_prop, new_obj, JSObject::kHeaderSize);
// Fill all of the in-object properties with the appropriate filler.
Register undef = x7;
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
Register filler = x7;
__ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
// Obtain number of pre-allocated property fields and in-object
// properties.
@ -432,48 +431,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register prop_fields = x6;
__ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
if (count_constructions) {
if (!is_api_function) {
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
__ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
__ B(eq, &no_inobject_slack_tracking);
constructon_count = NoReg;
// Fill the pre-allocated fields with undef.
__ FillFields(first_prop, prealloc_fields, undef);
__ FillFields(first_prop, prealloc_fields, filler);
// Register first_non_prealloc is the offset of the first field after
// Update first_prop register to be the offset of the first field after
// pre-allocated fields.
Register first_non_prealloc = x12;
__ Add(first_non_prealloc, first_prop,
__ Add(first_prop, first_prop,
Operand(prealloc_fields, LSL, kPointerSizeLog2));
first_prop = NoReg;
if (FLAG_debug_code) {
Register obj_end = x5;
Register obj_end = x14;
__ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
__ Cmp(first_non_prealloc, obj_end);
__ Cmp(first_prop, obj_end);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
// Fill the remaining fields with one pointer filler map.
Register one_pointer_filler = x5;
Register non_prealloc_fields = x6;
__ LoadRoot(one_pointer_filler, Heap::kOnePointerFillerMapRootIndex);
__ Sub(non_prealloc_fields, prop_fields, prealloc_fields);
__ FillFields(first_non_prealloc, non_prealloc_fields,
one_pointer_filler);
prop_fields = NoReg;
} else if (create_memento) {
__ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
__ Sub(prop_fields, prop_fields, prealloc_fields);
__ bind(&no_inobject_slack_tracking);
}
if (create_memento) {
// Fill the pre-allocated fields with undef.
__ FillFields(first_prop, prop_fields, undef);
__ FillFields(first_prop, prop_fields, filler);
__ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
__ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
// Load the AllocationSite
__ Peek(x14, 2 * kXRegSize);
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
__ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
first_prop = NoReg;
} else {
// Fill all of the property fields with undef.
__ FillFields(first_prop, prop_fields, undef);
__ FillFields(first_prop, prop_fields, filler);
first_prop = NoReg;
prop_fields = NoReg;
}
@ -516,7 +517,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Initialize the fields to undefined.
Register elements = x10;
__ Add(elements, new_array, FixedArray::kHeaderSize);
__ FillFields(elements, element_count, undef);
__ FillFields(elements, element_count, filler);
// Store the initialized FixedArray into the properties field of the
// JSObject.
@ -541,7 +542,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Peek(x4, 2 * kXRegSize);
__ Push(x4);
__ Push(constructor); // Argument for Runtime_NewObject.
__ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
__ Mov(x4, x0);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
@ -549,7 +550,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ jmp(&count_incremented);
} else {
__ Push(constructor); // Argument for Runtime_NewObject.
__ CallRuntime(Runtime::kHiddenNewObject, 1);
__ CallRuntime(Runtime::kNewObject, 1);
__ Mov(x4, x0);
}
@ -624,7 +625,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function && !count_constructions) {
if (!is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
@ -675,18 +676,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
Generate_JSConstructStubHelper(masm, true, false);
}
@ -786,7 +782,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@ -796,11 +792,11 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
Register function = x1;
// Preserve function. At the same time, push arguments for
// kHiddenCompileOptimized.
// kCompileOptimized.
__ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
__ Push(function, function, x10);
__ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
__ Pop(function);
@ -910,7 +906,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// preserve the registers with parameters.
__ PushXRegList(kSafepointSavedRegisters);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
__ PopXRegList(kSafepointSavedRegisters);
}
@ -940,7 +936,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Mov(x0, Smi::FromInt(static_cast<int>(type)));
__ Push(x0);
__ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it.
@ -1025,7 +1021,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ B(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHiddenStackGuard, 0);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@ -1069,7 +1065,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3a. Patch the first argument if necessary when calling a function.
Label shift_arguments;
__ Mov(call_type, static_cast<int>(call_type_JS_func));
{ Label convert_to_object, use_global_receiver, patch_receiver;
{ Label convert_to_object, use_global_proxy, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
@ -1093,8 +1089,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ JumpIfSmi(receiver, &convert_to_object);
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
&use_global_receiver);
__ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
&use_global_proxy);
__ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ JumpIfObjectType(receiver, scratch1, scratch2,
@ -1122,10 +1118,10 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Mov(call_type, static_cast<int>(call_type_JS_func));
__ B(&patch_receiver);
__ Bind(&use_global_receiver);
__ Bind(&use_global_proxy);
__ Ldr(receiver, GlobalObjectMemOperand());
__ Ldr(receiver,
FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
FieldMemOperand(receiver, GlobalObject::kGlobalProxyOffset));
__ Bind(&patch_receiver);
@ -1250,7 +1246,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
__ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
@ -1282,7 +1278,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute and push the receiver.
// Do not transform the receiver for strict mode functions.
Label convert_receiver_to_object, use_global_receiver;
Label convert_receiver_to_object, use_global_proxy;
__ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
__ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
// Do not transform the receiver for native functions.
@ -1290,9 +1286,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver in sloppy mode.
__ JumpIfSmi(receiver, &convert_receiver_to_object);
__ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
__ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
&use_global_receiver);
&use_global_proxy);
// Check if the receiver is already a JavaScript object.
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
@ -1306,9 +1302,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Mov(receiver, x0);
__ B(&push_receiver);
__ Bind(&use_global_receiver);
__ Bind(&use_global_proxy);
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
__ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
// Push the receiver
__ Bind(&push_receiver);

788
deps/v8/src/arm64/code-stubs-arm64.cc

File diff suppressed because it is too large

77
deps/v8/src/arm64/code-stubs-arm64.h

@ -5,7 +5,7 @@
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_
#include "ic-inl.h"
#include "src/ic-inl.h"
namespace v8 {
namespace internal {
@ -27,8 +27,8 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
Major MajorKey() const { return StoreBufferOverflow; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
@ -56,15 +56,14 @@ class StringHelper : public AllStatic {
class StoreRegistersStateStub: public PlatformCodeStub {
public:
StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: PlatformCodeStub(isolate), save_doubles_(with_fp) {}
explicit StoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() { return StoreRegistersState; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
Major MajorKey() const { return StoreRegistersState; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@ -72,14 +71,13 @@ class StoreRegistersStateStub: public PlatformCodeStub {
class RestoreRegistersStateStub: public PlatformCodeStub {
public:
RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: PlatformCodeStub(isolate), save_doubles_(with_fp) {}
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() { return RestoreRegistersState; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
SaveFPRegsMode save_doubles_;
Major MajorKey() const { return RestoreRegistersState; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@ -122,17 +120,17 @@ class RecordWriteStub: public PlatformCodeStub {
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
ASSERT(instr2->IsPCRelAddressing());
DCHECK(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
@ -151,31 +149,31 @@ class RecordWriteStub: public PlatformCodeStub {
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
ASSERT(GetMode(stub) == mode);
DCHECK(GetMode(stub) == mode);
}
private:
@ -191,7 +189,7 @@ class RecordWriteStub: public PlatformCodeStub {
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
ASSERT(!AreAliased(scratch, object, address));
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
@ -303,9 +301,9 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
Major MajorKey() const { return RecordWrite; }
int MinorKey() {
int MinorKey() const {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
@ -315,9 +313,9 @@ class RecordWriteStub: public PlatformCodeStub {
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
ASSERT(object.Is64Bits());
ASSERT(value.Is64Bits());
ASSERT(address.Is64Bits());
DCHECK(object.Is64Bits());
DCHECK(value.Is64Bits());
DCHECK(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
@ -354,8 +352,8 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
Major MajorKey() const { return DirectCEntry; }
int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; }
};
@ -400,11 +398,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return NameDictionaryLookup; }
Major MajorKey() const { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
@ -417,8 +413,8 @@ class SubStringStub: public PlatformCodeStub {
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
Major MajorKey() const { return SubString; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
@ -447,8 +443,8 @@ class StringCompareStub: public PlatformCodeStub {
Register scratch3);
private:
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual Major MajorKey() const { return StringCompare; }
virtual int MinorKey() const { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
@ -461,8 +457,9 @@ class StringCompareStub: public PlatformCodeStub {
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }

144
deps/v8/src/arm64/codegen-arm64.cc

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-arm64.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@ -35,7 +35,8 @@ UnaryMathFunction CreateExpFunction() {
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
@ -61,10 +62,10 @@ UnaryMathFunction CreateExpFunction() {
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
CpuFeatures::FlushICache(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
@ -85,14 +86,14 @@ UnaryMathFunction CreateSqrtFunction() {
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
@ -101,26 +102,28 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- x2 : receiver
// -- x3 : target map
// -----------------------------------
Register receiver = x2;
Register map = x3;
ASM_LOCATION(
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
DCHECK(!AreAliased(receiver, key, value, target_map));
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
map,
target_map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@ -130,19 +133,25 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
// ----------- S t a t e -------------
// -- lr : return address
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- x3 : target map, scratch for subsequent call
// -----------------------------------
Register receiver = x2;
Register target_map = x3;
Label gc_required, only_change_map;
Register elements = x4;
Register length = x5;
Register array_size = x6;
Register array = x7;
Register scratch = x6;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array_size, array));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@ -150,32 +159,28 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
Register array_size = x6;
Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = x6;
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@ -183,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
x6, kLRHasBeenSaved, kDontSaveFPRegs,
scratch, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
@ -202,7 +207,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
@ -234,20 +239,22 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
// -- x3 : target map, scratch for subsequent call
// -- x4 : scratch (elements)
// -----------------------------------
Register value = x0;
Register key = x1;
Register receiver = x2;
Register target_map = x3;
Register elements = x4;
Register array_size = x6;
Register array = x7;
Register length = x5;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array_size, array, length));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@ -256,7 +263,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
@ -264,20 +271,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Register array_size = x6;
Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = x6;
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
@ -315,8 +318,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
Register heap_num = length;
Register scratch = array_size;
Register scratch2 = elements;
__ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
x13, heap_num_map);
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
@ -351,7 +356,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
CodeAgingHelper::CodeAgingHelper() {
ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
@ -363,7 +368,7 @@ CodeAgingHelper::CodeAgingHelper() {
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
@ -415,7 +420,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@ -473,7 +478,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
STATIC_ASSERT(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
@ -511,10 +516,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
ASSERT(!AreAliased(input, result,
DCHECK(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label done;
DoubleRegister double_temp3 = result;
@ -534,7 +540,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
@ -564,7 +570,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
@ -572,7 +578,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.

4
deps/v8/src/arm64/codegen-arm64.h

@ -5,8 +5,8 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "ast.h"
#include "ic-inl.h"
#include "src/ast.h"
#include "src/ic-inl.h"
namespace v8 {
namespace internal {

17
deps/v8/src/arm64/constants-arm64.h

@ -15,7 +15,9 @@ STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
// Get the standard printf format macros for C99 stdint types.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
@ -25,8 +27,7 @@ namespace internal {
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
const unsigned kLiteralEntrySize = 4;
const unsigned kLiteralEntrySizeLog2 = 2;
const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
const unsigned kNumberOfRegisters = 32;
@ -258,15 +259,15 @@ enum Condition {
nv = 15 // Behaves as always/al.
};
inline Condition InvertCondition(Condition cond) {
inline Condition NegateCondition(Condition cond) {
// Conditions al and nv behave identically, as "always true". They can't be
// inverted, because there is no never condition.
ASSERT((cond != al) && (cond != nv));
DCHECK((cond != al) && (cond != nv));
return static_cast<Condition>(cond ^ 1);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseConditionForCmp(Condition cond) {
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
@ -293,7 +294,7 @@ inline Condition ReverseConditionForCmp(Condition cond) {
// 'mi' for instance).
UNREACHABLE();
return nv;
};
}
}
enum FlagsUpdate {
@ -399,7 +400,7 @@ enum SystemRegister {
//
// The enumerations can be used like this:
//
// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
// DCHECK(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
// switch(instr->Mask(PCRelAddressingMask)) {
// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;

37
deps/v8/src/arm64/cpu-arm64.cc

@ -4,23 +4,16 @@
// CPU specific code for arm independent of OS goes here.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "arm64/cpu-arm64.h"
#include "arm64/utils-arm64.h"
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
class CacheLineSizes {
public:
CacheLineSizes() {
@ -31,22 +24,23 @@ class CacheLineSizes {
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register_));
#endif
};
}
uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of the caches as a power of two.
return 1 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
// The cache type register holds the size of cache lines in words as a
// power of two.
return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
}
uint32_t cache_type_register_;
};
void CPU::FlushICache(void* address, size_t length) {
void CpuFeatures::FlushICache(void* address, size_t length) {
if (length == 0) return;
#ifdef USE_SIMULATOR
@ -65,8 +59,8 @@ void CPU::FlushICache(void* address, size_t length) {
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
ASSERT(CountSetBits(dsize, 64) == 1);
ASSERT(CountSetBits(isize, 64) == 1);
DCHECK(CountSetBits(dsize, 64) == 1);
DCHECK(CountSetBits(isize, 64) == 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
@ -124,17 +118,6 @@ void CPU::FlushICache(void* address, size_t length) {
#endif
}
void CpuFeatures::Probe(bool serializer_enabled) {
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
#ifdef DEBUG
initialized_ = true;
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

71
deps/v8/src/arm64/cpu-arm64.h

@ -1,71 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_CPU_ARM64_H_
#define V8_ARM64_CPU_ARM64_H_
#include <stdio.h>
#include "serialize.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe(bool serializer_enabled);
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for ARM64.
return false;
};
// There are no optional features for ARM64.
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return IsSupported(f);
}
// I and D cache line size in bytes.
static unsigned dcache_line_size();
static unsigned icache_line_size();
static unsigned supported_;
static bool VerifyCrossCompiling() {
// There are no optional features for ARM64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for ARM64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
}
static bool SupportsCrankshaft() { return true; }
private:
#ifdef DEBUG
static bool initialized_;
#endif
// This isn't used (and is always 0), but it is required by V8.
static unsigned cross_compile_;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
} } // namespace v8::internal
#endif // V8_ARM64_CPU_ARM64_H_

162
deps/v8/src/arm64/debug-arm64.cc

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "codegen.h"
#include "debug.h"
#include "src/codegen.h"
#include "src/debug.h"
namespace v8 {
namespace internal {
@ -46,7 +46,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
patcher.LoadLiteral(ip0, 3 * kInstructionSize);
patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
@ -67,21 +67,21 @@ void BreakLocationIterator::ClearDebugBreakAtReturn() {
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
DCHECK(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
// Patch the code emitted by Debug::GenerateSlots, changing the debug break
// slot code from
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
@ -105,7 +105,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
@ -118,12 +118,11 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
DCHECK(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
@ -132,6 +131,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
__ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
__ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
__ Push(scratch);
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
@ -145,12 +150,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
ASSERT((~kCallerSaved.list() & object_regs) == 0);
ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0);
ASSERT((scratch.Bit() & object_regs) == 0);
ASSERT((scratch.Bit() & non_object_regs) == 0);
ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
DCHECK((~kCallerSaved.list() & object_regs) == 0);
DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
DCHECK((object_regs & non_object_regs) == 0);
DCHECK((scratch.Bit() & object_regs) == 0);
DCHECK((scratch.Bit() & non_object_regs) == 0);
DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
@ -158,15 +163,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
__ Push(reg);
__ Poke(wzr, 0);
__ Push(reg.W(), wzr);
__ Lsr(scratch, reg, 32);
__ SmiTagAndPush(scratch, reg);
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
}
if (object_regs != 0) {
@ -201,21 +207,24 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Bfxil(reg, scratch, 32, 32);
}
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
masm->isolate());
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ Mov(scratch, after_break_target);
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- x1 : function
@ -225,54 +234,41 @@ void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x2 : name
// -- lr : return address
// -- x0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers x0 and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
Register receiver = LoadIC::ReceiverRegister();
Register name = LoadIC::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : receiver
// -- x2 : name
// -- lr : return address
// -----------------------------------
// Registers x0, x1, and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm64.cc).
Register receiver = StoreIC::ReceiverRegister();
Register name = StoreIC::NameRegister();
Register value = StoreIC::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- x0 : key
// -- x1 : receiver
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for keyed IC load (from ic-arm.cc).
GenerateLoadICDebugBreak(masm);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc).
Register receiver = KeyedStoreIC::ReceiverRegister();
Register name = KeyedStoreIC::NameRegister();
Register value = KeyedStoreIC::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
@ -281,7 +277,7 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
@ -289,7 +285,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
@ -298,7 +294,7 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
@ -308,7 +304,8 @@ void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
@ -321,7 +318,7 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
}
void Debug::GenerateSlot(MacroAssembler* masm) {
void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
@ -333,23 +330,48 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
__ Ret();
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot =
ExternalReference::debug_restarter_frame_function_pointer_address(
masm->isolate());
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Mov(scratch, restarter_frame_function_slot);
__ Str(xzr, MemOperand(scratch));
// We do not know our frame height, but set sp based on fp.
__ Sub(masm->StackPointer(), fp, kPointerSize);
__ AssertStackConsistency();
__ Pop(x1, fp, lr); // Function, Frame, Return address.
// Load context from the function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// Get function code.
__ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
__ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
// Re-run JSFunction, x1 is function, cp is context.
__ Br(scratch);
}
const bool Debug::kFrameDropperSupported = false;
const bool LiveEdit::kFrameDropperSupported = true;
} } // namespace v8::internal

30
deps/v8/src/arm64/decoder-arm64-inl.h

@ -5,9 +5,9 @@
#ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_
#include "arm64/decoder-arm64.h"
#include "globals.h"
#include "utils.h"
#include "src/arm64/decoder-arm64.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
@ -96,17 +96,17 @@ void Decoder<V>::Decode(Instruction *instr) {
template<typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x0);
DCHECK(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
ASSERT(instr->Bit(28) == 0x1);
DCHECK(instr->Bit(28) == 0x1);
V::VisitPCRelAddressing(instr);
}
template<typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x4) ||
DCHECK((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
@ -208,7 +208,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x8) ||
DCHECK((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
@ -328,7 +328,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x2);
DCHECK(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
V::VisitUnallocated(instr);
@ -348,7 +348,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x3);
DCHECK(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
@ -374,7 +374,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x1);
DCHECK(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
V::VisitUnallocated(instr);
} else {
@ -385,7 +385,7 @@ void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xA) ||
DCHECK((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
if (instr->Bit(24) == 0) {
@ -501,7 +501,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeFP(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xE) ||
DCHECK((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
@ -614,7 +614,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
}
} else {
// Bit 30 == 1 has been handled earlier.
ASSERT(instr->Bit(30) == 0);
DCHECK(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
V::VisitUnallocated(instr);
} else {
@ -630,7 +630,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
ASSERT(instr->Bits(29, 25) == 0x6);
DCHECK(instr->Bits(29, 25) == 0x6);
V::VisitUnimplemented(instr);
}
@ -638,7 +638,7 @@ void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
ASSERT(instr->Bits(27, 25) == 0x7);
DCHECK(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
}

14
deps/v8/src/arm64/decoder-arm64.cc

@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "globals.h"
#include "utils.h"
#include "arm64/decoder-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
@ -39,7 +39,7 @@ void DispatchingDecoderVisitor::InsertVisitorBefore(
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
DCHECK(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
@ -57,7 +57,7 @@ void DispatchingDecoderVisitor::InsertVisitorAfter(
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
DCHECK(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
@ -70,7 +70,7 @@ void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
#define DEFINE_VISITOR_CALLERS(A) \
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
DCHECK(instr->Mask(A##FMask) == A##Fixed); \
} \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \

4
deps/v8/src/arm64/decoder-arm64.h

@ -7,8 +7,8 @@
#include <list>
#include "globals.h"
#include "arm64/instructions-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
namespace v8 {
namespace internal {

55
deps/v8/src/arm64/delayed-masm-arm64-inl.h

@ -0,0 +1,55 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_
#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_
#include "src/arm64/delayed-masm-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void DelayedMasm::EndDelayedUse() {
EmitPending();
DCHECK(!scratch_register_acquired_);
ResetSavedValue();
}
void DelayedMasm::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
EmitPending();
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Mov(rd, operand, discard_mode);
}
void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
EmitPending();
__ Fmov(fd, fn);
}
void DelayedMasm::Fmov(FPRegister fd, double imm) {
EmitPending();
__ Fmov(fd, imm);
}
void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
EmitPending();
DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
__ LoadObject(result, object);
}
#undef __
} } // namespace v8::internal
#endif // V8_ARM64_DELAYED_MASM_ARM64_INL_H_

198
deps/v8/src/arm64/delayed-masm-arm64.cc

@ -0,0 +1,198 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/delayed-masm-arm64.h"
#include "src/arm64/lithium-codegen-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
DCHECK(src->IsStackSlot());
DCHECK(dst->IsStackSlot());
MemOperand src_operand = cgen_->ToMemOperand(src);
MemOperand dst_operand = cgen_->ToMemOperand(dst);
if (pending_ == kStackSlotMove) {
DCHECK(pending_pc_ == masm_->pc_offset());
UseScratchRegisterScope scope(masm_);
DoubleRegister temp1 = scope.AcquireD();
DoubleRegister temp2 = scope.AcquireD();
switch (MemOperand::AreConsistentForPair(pending_address_src_,
src_operand)) {
case MemOperand::kNotPair:
__ Ldr(temp1, pending_address_src_);
__ Ldr(temp2, src_operand);
break;
case MemOperand::kPairAB:
__ Ldp(temp1, temp2, pending_address_src_);
break;
case MemOperand::kPairBA:
__ Ldp(temp2, temp1, src_operand);
break;
}
switch (MemOperand::AreConsistentForPair(pending_address_dst_,
dst_operand)) {
case MemOperand::kNotPair:
__ Str(temp1, pending_address_dst_);
__ Str(temp2, dst_operand);
break;
case MemOperand::kPairAB:
__ Stp(temp1, temp2, pending_address_dst_);
break;
case MemOperand::kPairBA:
__ Stp(temp2, temp1, dst_operand);
break;
}
ResetPending();
return;
}
EmitPending();
pending_ = kStackSlotMove;
pending_address_src_ = src_operand;
pending_address_dst_ = dst_operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
DCHECK(!scratch_register_acquired_);
if ((pending_ == kStoreConstant) && (value == pending_value_)) {
MemOperand::PairResult result =
MemOperand::AreConsistentForPair(pending_address_dst_, operand);
if (result != MemOperand::kNotPair) {
const MemOperand& dst =
(result == MemOperand::kPairAB) ?
pending_address_dst_ :
operand;
DCHECK(pending_pc_ == masm_->pc_offset());
if (pending_value_ == 0) {
__ Stp(xzr, xzr, dst);
} else {
SetSavedValue(pending_value_);
__ Stp(ScratchRegister(), ScratchRegister(), dst);
}
ResetPending();
return;
}
}
EmitPending();
pending_ = kStoreConstant;
pending_address_dst_ = operand;
pending_value_ = value;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
if ((pending_ == kLoad) &&
pending_register_.IsSameSizeAndType(rd)) {
switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
case MemOperand::kNotPair:
break;
case MemOperand::kPairAB:
DCHECK(pending_pc_ == masm_->pc_offset());
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Ldp(pending_register_, rd, pending_address_src_);
ResetPending();
return;
case MemOperand::kPairBA:
DCHECK(pending_pc_ == masm_->pc_offset());
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
__ Ldp(rd, pending_register_, operand);
ResetPending();
return;
}
}
EmitPending();
pending_ = kLoad;
pending_register_ = rd;
pending_address_src_ = operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
if ((pending_ == kStore) &&
pending_register_.IsSameSizeAndType(rd)) {
switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
case MemOperand::kNotPair:
break;
case MemOperand::kPairAB:
DCHECK(pending_pc_ == masm_->pc_offset());
__ Stp(pending_register_, rd, pending_address_dst_);
ResetPending();
return;
case MemOperand::kPairBA:
DCHECK(pending_pc_ == masm_->pc_offset());
__ Stp(rd, pending_register_, operand);
ResetPending();
return;
}
}
EmitPending();
pending_ = kStore;
pending_register_ = rd;
pending_address_dst_ = operand;
#ifdef DEBUG
pending_pc_ = masm_->pc_offset();
#endif
}
void DelayedMasm::EmitPending() {
DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
switch (pending_) {
case kNone:
return;
case kStoreConstant:
if (pending_value_ == 0) {
__ Str(xzr, pending_address_dst_);
} else {
SetSavedValue(pending_value_);
__ Str(ScratchRegister(), pending_address_dst_);
}
break;
case kLoad:
DCHECK(!IsScratchRegister(pending_register_) ||
scratch_register_acquired_);
__ Ldr(pending_register_, pending_address_src_);
break;
case kStore:
__ Str(pending_register_, pending_address_dst_);
break;
case kStackSlotMove: {
UseScratchRegisterScope scope(masm_);
DoubleRegister temp = scope.AcquireD();
__ Ldr(temp, pending_address_src_);
__ Str(temp, pending_address_dst_);
break;
}
}
ResetPending();
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

164
deps/v8/src/arm64/delayed-masm-arm64.h

@ -0,0 +1,164 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_
#define V8_ARM64_DELAYED_MASM_ARM64_H_
#include "src/lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
// This class delays the generation of some instructions. This way, we have a
// chance to merge two instructions in one (with load/store pair).
// Each instruction must either:
// - merge with the pending instruction and generate just one instruction.
// - emit the pending instruction and then generate the instruction (or set the
// pending instruction).
class DelayedMasm BASE_EMBEDDED {
public:
DelayedMasm(LCodeGen* owner,
MacroAssembler* masm,
const Register& scratch_register)
: cgen_(owner), masm_(masm), scratch_register_(scratch_register),
scratch_register_used_(false), pending_(kNone), saved_value_(0) {
#ifdef DEBUG
pending_register_ = no_reg;
pending_value_ = 0;
pending_pc_ = 0;
scratch_register_acquired_ = false;
#endif
}
~DelayedMasm() {
DCHECK(!scratch_register_acquired_);
DCHECK(!scratch_register_used_);
DCHECK(!pending());
}
inline void EndDelayedUse();
const Register& ScratchRegister() {
scratch_register_used_ = true;
return scratch_register_;
}
bool IsScratchRegister(const CPURegister& reg) {
return reg.Is(scratch_register_);
}
bool scratch_register_used() const { return scratch_register_used_; }
void reset_scratch_register_used() { scratch_register_used_ = false; }
// Acquire/Release scratch register for use outside this class.
void AcquireScratchRegister() {
EmitPending();
ResetSavedValue();
#ifdef DEBUG
DCHECK(!scratch_register_acquired_);
scratch_register_acquired_ = true;
#endif
}
void ReleaseScratchRegister() {
#ifdef DEBUG
DCHECK(scratch_register_acquired_);
scratch_register_acquired_ = false;
#endif
}
bool pending() { return pending_ != kNone; }
// Extra layer over the macro-assembler instructions (which emits the
// potential pending instruction).
inline void Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
inline void Fmov(FPRegister fd, FPRegister fn);
inline void Fmov(FPRegister fd, double imm);
inline void LoadObject(Register result, Handle<Object> object);
// Instructions which try to merge which the pending instructions.
void StackSlotMove(LOperand* src, LOperand* dst);
// StoreConstant can only be used if the scratch register is not acquired.
void StoreConstant(uint64_t value, const MemOperand& operand);
void Load(const CPURegister& rd, const MemOperand& operand);
void Store(const CPURegister& rd, const MemOperand& operand);
// Emit the potential pending instruction.
void EmitPending();
// Reset the pending state.
void ResetPending() {
pending_ = kNone;
#ifdef DEBUG
pending_register_ = no_reg;
MemOperand tmp;
pending_address_src_ = tmp;
pending_address_dst_ = tmp;
pending_value_ = 0;
pending_pc_ = 0;
#endif
}
void InitializeRootRegister() {
masm_->InitializeRootRegister();
}
private:
// Set the saved value and load the ScratchRegister with it.
void SetSavedValue(uint64_t saved_value) {
DCHECK(saved_value != 0);
if (saved_value_ != saved_value) {
masm_->Mov(ScratchRegister(), saved_value);
saved_value_ = saved_value;
}
}
// Reset the saved value (i.e. the value of ScratchRegister is no longer
// known).
void ResetSavedValue() {
saved_value_ = 0;
}
LCodeGen* cgen_;
MacroAssembler* masm_;
// Register used to store a constant.
Register scratch_register_;
bool scratch_register_used_;
// Sometimes we store or load two values in two contiguous stack slots.
// In this case, we try to use the ldp/stp instructions to reduce code size.
// To be able to do that, instead of generating directly the instructions,
// we register with the following fields that an instruction needs to be
// generated. Then with the next instruction, if the instruction is
// consistent with the pending one for stp/ldp we generate ldp/stp. Else,
// if they are not consistent, we generate the pending instruction and we
// register the new instruction (which becomes pending).
// Enumeration of instructions which can be pending.
enum Pending {
kNone,
kStoreConstant,
kLoad, kStore,
kStackSlotMove
};
// The pending instruction.
Pending pending_;
// For kLoad, kStore: register which must be loaded/stored.
CPURegister pending_register_;
// For kLoad, kStackSlotMove: address of the load.
MemOperand pending_address_src_;
// For kStoreConstant, kStore, kStackSlotMove: address of the store.
MemOperand pending_address_dst_;
// For kStoreConstant: value to be stored.
uint64_t pending_value_;
// Value held into the ScratchRegister if the saved_value_ is not 0.
// For 0, we use xzr.
uint64_t saved_value_;
#ifdef DEBUG
// Address where the pending instruction must be generated. It's only used to
// check that nothing else has been generated since we set the pending
// instruction.
int pending_pc_;
// If true, the scratch register has been acquired outside this class. The
// scratch register can no longer be used for constants.
bool scratch_register_acquired_;
#endif
};
} } // namespace v8::internal
#endif // V8_ARM64_DELAYED_MASM_ARM64_H_

90
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
#include "src/safepoint-table.h"
namespace v8 {
@ -32,9 +32,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
SharedFunctionInfo* shared =
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
@ -48,13 +45,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
ASSERT((prev_call_address == NULL) ||
DCHECK((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
ASSERT(call_address + patch_size() <= code->instruction_end());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
@ -93,7 +90,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
@ -110,47 +107,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm->
static void CopyRegisterDumpToFrame(MacroAssembler* masm,
Register frame,
CPURegList reg_list,
Register scratch1,
Register scratch2,
int src_offset,
int dst_offset) {
int offset0, offset1;
CPURegList copy_to_input = reg_list;
int reg_count = reg_list.Count();
int reg_size = reg_list.RegisterSizeInBytes();
for (int i = 0; i < (reg_count / 2); i++) {
__ PeekPair(scratch1, scratch2, src_offset + (i * reg_size * 2));
offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
offset1 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
if ((offset0 + reg_size) == offset1) {
// Registers are adjacent: store in pairs.
__ Stp(scratch1, scratch2, MemOperand(frame, offset0));
} else {
// Registers are not adjacent: store individually.
__ Str(scratch1, MemOperand(frame, offset0));
__ Str(scratch2, MemOperand(frame, offset1));
}
}
if ((reg_count & 1) != 0) {
__ Peek(scratch1, src_offset + (reg_count - 1) * reg_size);
offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
__ Str(scratch1, MemOperand(frame, offset0));
}
}
#undef __
#define __ masm()->
@ -214,13 +170,23 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CopyRegisterDumpToFrame(masm(), x1, saved_registers, x2, x4, 0,
FrameDescription::registers_offset());
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame.
CopyRegisterDumpToFrame(masm(), x1, saved_fp_registers, x2, x4,
kFPRegistersOffset,
FrameDescription::double_registers_offset());
for (int i = 0; i < saved_fp_registers.Count(); i++) {
int dst_offset = FrameDescription::double_registers_offset() +
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@ -284,7 +250,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
@ -311,7 +277,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
ASSERT(!saved_registers.IncludesAliasOf(lr));
DCHECK(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
@ -354,14 +320,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(entry_id, i);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);

70
deps/v8/src/arm64/disasm-arm64.cc

@ -3,19 +3,19 @@
// found in the LICENSE file.
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "disasm.h"
#include "arm64/decoder-arm64-inl.h"
#include "arm64/disasm-arm64.h"
#include "macro-assembler.h"
#include "platform.h"
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@ -258,7 +258,7 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
ASSERT((reg_size == kXRegSizeInBits) ||
DCHECK((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
@ -1176,7 +1176,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
}
}
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
ASSERT(instr->Mask(SystemHintMask) == HINT);
DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
case NOP: {
mnemonic = "nop";
@ -1246,7 +1246,7 @@ void Disassembler::Format(Instruction* instr, const char* mnemonic,
const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
ASSERT(mnemonic != NULL);
DCHECK(mnemonic != NULL);
ResetOutput();
Substitute(instr, mnemonic);
if (format != NULL) {
@ -1364,7 +1364,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
int Disassembler::SubstituteImmediateField(Instruction* instr,
const char* format) {
ASSERT(format[0] == 'I');
DCHECK(format[0] == 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
@ -1372,7 +1372,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
ASSERT(format[5] == 'L');
DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
@ -1384,7 +1384,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
instr->ImmLLiteral() << kLiteralEntrySizeLog2);
instr->ImmLLiteral() << kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
@ -1417,7 +1417,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
return 6;
}
case 'A': { // IAddSub.
ASSERT(instr->ShiftAddSub() <= 1);
DCHECK(instr->ShiftAddSub() <= 1);
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
@ -1474,7 +1474,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
const char* format) {
ASSERT((format[0] == 'I') && (format[1] == 'B'));
DCHECK((format[0] == 'I') && (format[1] == 'B'));
unsigned r = instr->ImmR();
unsigned s = instr->ImmS();
@ -1488,13 +1488,13 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
AppendToOutput("#%d", s + 1);
return 5;
} else {
ASSERT(format[3] == '-');
DCHECK(format[3] == '-');
AppendToOutput("#%d", s - r + 1);
return 7;
}
}
case 'Z': { // IBZ-r.
ASSERT((format[3] == '-') && (format[4] == 'r'));
DCHECK((format[3] == '-') && (format[4] == 'r'));
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
: kWRegSizeInBits;
AppendToOutput("#%d", reg_size - r);
@ -1510,7 +1510,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
int Disassembler::SubstituteLiteralField(Instruction* instr,
const char* format) {
ASSERT(strncmp(format, "LValue", 6) == 0);
DCHECK(strncmp(format, "LValue", 6) == 0);
USE(format);
switch (instr->Mask(LoadLiteralMask)) {
@ -1526,12 +1526,12 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
ASSERT(format[0] == 'H');
ASSERT(instr->ShiftDP() <= 0x3);
DCHECK(format[0] == 'H');
DCHECK(instr->ShiftDP() <= 0x3);
switch (format[1]) {
case 'D': { // HDP.
ASSERT(instr->ShiftDP() != ROR);
DCHECK(instr->ShiftDP() != ROR);
} // Fall through.
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
@ -1550,7 +1550,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
int Disassembler::SubstituteConditionField(Instruction* instr,
const char* format) {
ASSERT(format[0] == 'C');
DCHECK(format[0] == 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
@ -1559,7 +1559,7 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
switch (format[1]) {
case 'B': cond = instr->ConditionBranch(); break;
case 'I': {
cond = InvertCondition(static_cast<Condition>(instr->Condition()));
cond = NegateCondition(static_cast<Condition>(instr->Condition()));
break;
}
default: cond = instr->Condition();
@ -1572,12 +1572,12 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
int offset = instr->ImmPCRel();
// Only ADR (AddrPCRelByte) is supported.
ASSERT(strcmp(format, "AddrPCRelByte") == 0);
DCHECK(strcmp(format, "AddrPCRelByte") == 0);
char sign = '+';
if (offset < 0) {
@ -1592,7 +1592,7 @@ int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
int Disassembler::SubstituteBranchTargetField(Instruction* instr,
const char* format) {
ASSERT(strncmp(format, "BImm", 4) == 0);
DCHECK(strncmp(format, "BImm", 4) == 0);
int64_t offset = 0;
switch (format[5]) {
@ -1619,8 +1619,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
int Disassembler::SubstituteExtendField(Instruction* instr,
const char* format) {
ASSERT(strncmp(format, "Ext", 3) == 0);
ASSERT(instr->ExtendMode() <= 7);
DCHECK(strncmp(format, "Ext", 3) == 0);
DCHECK(instr->ExtendMode() <= 7);
USE(format);
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@ -1646,7 +1646,7 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
ASSERT(strncmp(format, "Offsetreg", 9) == 0);
DCHECK(strncmp(format, "Offsetreg", 9) == 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
USE(format);
@ -1675,7 +1675,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
int Disassembler::SubstitutePrefetchField(Instruction* instr,
const char* format) {
ASSERT(format[0] == 'P');
DCHECK(format[0] == 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
@ -1690,7 +1690,7 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
int Disassembler::SubstituteBarrierField(Instruction* instr,
const char* format) {
ASSERT(format[0] == 'M');
DCHECK(format[0] == 'M');
USE(format);
static const char* options[4][4] = {
@ -1734,7 +1734,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
@ -1752,7 +1752,7 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
if (ureg == v8::internal::kZeroRegCode) {
return "xzr";
}
v8::internal::OS::SNPrintF(tmp_buffer_, "x%u", ureg);
v8::internal::SNPrintF(tmp_buffer_, "x%u", ureg);
return tmp_buffer_.start();
}
@ -1786,7 +1786,7 @@ class BufferDisassembler : public v8::internal::Disassembler {
~BufferDisassembler() { }
virtual void ProcessOutput(v8::internal::Instruction* instr) {
v8::internal::OS::SNPrintF(out_buffer_, "%s", GetOutput());
v8::internal::SNPrintF(out_buffer_, "%s", GetOutput());
}
private:
@ -1797,7 +1797,7 @@ Disassembler::Disassembler(const NameConverter& converter)
: converter_(converter) {}
Disassembler::~Disassembler() {}
Disassembler::~Disassembler() { USE(converter_); }
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,

10
deps/v8/src/arm64/disasm-arm64.h

@ -5,12 +5,12 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "v8.h"
#include "src/v8.h"
#include "globals.h"
#include "utils.h"
#include "instructions-arm64.h"
#include "decoder-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {

10
deps/v8/src/arm64/frames-arm64.cc

@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "v8.h"
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "assembler.h"
#include "assembler-arm64.h"
#include "assembler-arm64-inl.h"
#include "frames.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
#include "src/frames.h"
namespace v8 {
namespace internal {

5
deps/v8/src/arm64/frames-arm64.h

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "arm64/constants-arm64.h"
#include "arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/constants-arm64.h"
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
@ -15,7 +15,6 @@ const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save